diff options
author | root <root@artemis.panaceas.org> | 2015-12-25 15:00:15 +0000 |
---|---|---|
committer | root <root@artemis.panaceas.org> | 2015-12-25 15:00:15 +0000 |
commit | ddd86436f4e3643c04b797f858dab95d5f2e4de9 (patch) | |
tree | bfe7a780cf9a2f4fc33aec32c82e625e79dece1f /drivers/gpu/drm/nouveau | |
download | backports-3.10.19-1-master.tar.gz backports-3.10.19-1-master.tar.bz2 backports-3.10.19-1-master.zip |
Diffstat (limited to 'drivers/gpu/drm/nouveau')
381 files changed, 98393 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig new file mode 100644 index 0000000..ebde984 --- /dev/null +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -0,0 +1,56 @@ +config DRM_NOUVEAU + tristate "Nouveau (nVidia) cards" + depends on m + depends on DRM && PCI + select BACKPORT_FW_LOADER + select DRM_KMS_HELPER + select DRM_TTM + depends on FB_CFB_FILLRECT + depends on FB_CFB_COPYAREA + depends on FB_CFB_IMAGEBLIT + depends on FB + select FRAMEBUFFER_CONSOLE if !EXPERT + select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT + select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT + select X86_PLATFORM_DEVICES if ACPI && X86 + select ACPI_WMI if ACPI && X86 + select MXM_WMI if ACPI && X86 + depends on POWER_SUPPLY + help + Choose this option for open-source nVidia support. + +config NOUVEAU_DEBUG + int "Maximum debug level" + depends on DRM_NOUVEAU + range 0 7 + default 5 + help + Selects the maximum debug level to compile support for. + + 0 - fatal + 1 - error + 2 - warning + 3 - info + 4 - debug + 5 - trace (recommended) + 6 - paranoia + 7 - spam + + The paranoia and spam levels will add a lot of extra checks which + may potentially slow down driver operation. + +config NOUVEAU_DEBUG_DEFAULT + int "Default debug level" + depends on DRM_NOUVEAU + range 0 7 + default 3 + help + Selects the default debug level + +config DRM_NOUVEAU_BACKLIGHT + bool "Support for backlight control" + depends on DRM_NOUVEAU + default y + help + Say Y here if you want to control the backlight of your display + (e.g. a laptop panel). diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile new file mode 100644 index 0000000..7ae4ef8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/Makefile @@ -0,0 +1,247 @@ +# +# Makefile for the drm device driver. This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. + +ccflags-y += -I$(backport_srctree)/include/drm +ccflags-y += -I$(src)/core/include +ccflags-y += -I$(src)/core +ccflags-y += -I$(src) + +nouveau-y := core/core/client.o +nouveau-y += core/core/engctx.o +nouveau-y += core/core/engine.o +nouveau-y += core/core/enum.o +nouveau-y += core/core/event.o +nouveau-y += core/core/falcon.o +nouveau-y += core/core/gpuobj.o +nouveau-y += core/core/handle.o +nouveau-y += core/core/mm.o +nouveau-y += core/core/namedb.o +nouveau-y += core/core/object.o +nouveau-y += core/core/option.o +nouveau-y += core/core/parent.o +nouveau-y += core/core/printk.o +nouveau-y += core/core/ramht.o +nouveau-y += core/core/subdev.o + +nouveau-y += core/subdev/bar/base.o +nouveau-y += core/subdev/bar/nv50.o +nouveau-y += core/subdev/bar/nvc0.o +nouveau-y += core/subdev/bios/base.o +nouveau-y += core/subdev/bios/bit.o +nouveau-y += core/subdev/bios/conn.o +nouveau-y += core/subdev/bios/dcb.o +nouveau-y += core/subdev/bios/disp.o +nouveau-y += core/subdev/bios/dp.o +nouveau-y += core/subdev/bios/extdev.o +nouveau-y += core/subdev/bios/gpio.o +nouveau-y += core/subdev/bios/i2c.o +nouveau-y += core/subdev/bios/init.o +nouveau-y += core/subdev/bios/mxm.o +nouveau-y += core/subdev/bios/perf.o +nouveau-y += core/subdev/bios/pll.o +nouveau-y += core/subdev/bios/therm.o +nouveau-y += core/subdev/bios/xpio.o +nouveau-y += core/subdev/bus/nv04.o +nouveau-y += core/subdev/bus/nv31.o +nouveau-y += core/subdev/bus/nv50.o +nouveau-y += core/subdev/bus/nvc0.o +nouveau-y += core/subdev/clock/nv04.o +nouveau-y += core/subdev/clock/nv40.o +nouveau-y += core/subdev/clock/nv50.o +nouveau-y += core/subdev/clock/nva3.o +nouveau-y += core/subdev/clock/nvc0.o +nouveau-y += core/subdev/clock/pllnv04.o +nouveau-y += core/subdev/clock/pllnva3.o +nouveau-y += core/subdev/devinit/base.o +nouveau-y += core/subdev/devinit/nv04.o +nouveau-y += core/subdev/devinit/nv05.o +nouveau-y += core/subdev/devinit/nv10.o +nouveau-y += core/subdev/devinit/nv1a.o +nouveau-y += core/subdev/devinit/nv20.o +nouveau-y += core/subdev/devinit/nv50.o +nouveau-y += core/subdev/fb/base.o +nouveau-y += core/subdev/fb/nv04.o +nouveau-y += core/subdev/fb/nv10.o +nouveau-y += core/subdev/fb/nv1a.o +nouveau-y += core/subdev/fb/nv20.o +nouveau-y += core/subdev/fb/nv25.o +nouveau-y += core/subdev/fb/nv30.o +nouveau-y += core/subdev/fb/nv35.o +nouveau-y += core/subdev/fb/nv36.o +nouveau-y += core/subdev/fb/nv40.o +nouveau-y += core/subdev/fb/nv41.o +nouveau-y += core/subdev/fb/nv44.o +nouveau-y += core/subdev/fb/nv46.o +nouveau-y += core/subdev/fb/nv47.o +nouveau-y += core/subdev/fb/nv49.o +nouveau-y += core/subdev/fb/nv4e.o +nouveau-y += core/subdev/fb/nv50.o +nouveau-y += core/subdev/fb/nvc0.o +nouveau-y += core/subdev/gpio/base.o +nouveau-y += core/subdev/gpio/nv10.o +nouveau-y += core/subdev/gpio/nv50.o +nouveau-y += core/subdev/gpio/nvd0.o +nouveau-y += core/subdev/gpio/nve0.o +nouveau-y += core/subdev/i2c/base.o +nouveau-y += core/subdev/i2c/anx9805.o +nouveau-y += core/subdev/i2c/aux.o +nouveau-y += core/subdev/i2c/bit.o +nouveau-y += core/subdev/i2c/nv04.o +nouveau-y += core/subdev/i2c/nv4e.o +nouveau-y += core/subdev/i2c/nv50.o +nouveau-y += core/subdev/i2c/nv94.o +nouveau-y += core/subdev/i2c/nvd0.o +nouveau-y += core/subdev/ibus/nvc0.o +nouveau-y += core/subdev/ibus/nve0.o +nouveau-y += core/subdev/instmem/base.o +nouveau-y += core/subdev/instmem/nv04.o +nouveau-y += core/subdev/instmem/nv40.o +nouveau-y += core/subdev/instmem/nv50.o +nouveau-y += core/subdev/ltcg/nvc0.o +nouveau-y += core/subdev/mc/base.o +nouveau-y += core/subdev/mc/nv04.o +nouveau-y += core/subdev/mc/nv44.o +nouveau-y += core/subdev/mc/nv50.o +nouveau-y += core/subdev/mc/nv98.o +nouveau-y += core/subdev/mc/nvc0.o +nouveau-y += core/subdev/mxm/base.o +nouveau-y += core/subdev/mxm/mxms.o +nouveau-y += core/subdev/mxm/nv50.o +nouveau-y += core/subdev/therm/base.o +nouveau-y += core/subdev/therm/fan.o +nouveau-y += core/subdev/therm/fannil.o +nouveau-y += core/subdev/therm/fanpwm.o +nouveau-y += core/subdev/therm/fantog.o +nouveau-y += core/subdev/therm/ic.o +nouveau-y += core/subdev/therm/temp.o +nouveau-y += core/subdev/therm/nv40.o +nouveau-y += core/subdev/therm/nv50.o +nouveau-y += core/subdev/therm/nv84.o +nouveau-y += core/subdev/therm/nva3.o +nouveau-y += core/subdev/therm/nvd0.o +nouveau-y += core/subdev/timer/base.o +nouveau-y += core/subdev/timer/nv04.o +nouveau-y += core/subdev/vm/base.o +nouveau-y += core/subdev/vm/nv04.o +nouveau-y += core/subdev/vm/nv41.o +nouveau-y += core/subdev/vm/nv44.o +nouveau-y += core/subdev/vm/nv50.o +nouveau-y += core/subdev/vm/nvc0.o + +nouveau-y += core/engine/dmaobj/base.o +nouveau-y += core/engine/dmaobj/nv04.o +nouveau-y += core/engine/dmaobj/nv50.o +nouveau-y += core/engine/dmaobj/nvc0.o +nouveau-y += core/engine/dmaobj/nvd0.o +nouveau-y += core/engine/bsp/nv84.o +nouveau-y += core/engine/bsp/nvc0.o +nouveau-y += core/engine/bsp/nve0.o +nouveau-y += core/engine/copy/nva3.o +nouveau-y += core/engine/copy/nvc0.o +nouveau-y += core/engine/copy/nve0.o +nouveau-y += core/engine/crypt/nv84.o +nouveau-y += core/engine/crypt/nv98.o +nouveau-y += core/engine/device/base.o +nouveau-y += core/engine/device/nv04.o +nouveau-y += core/engine/device/nv10.o +nouveau-y += core/engine/device/nv20.o +nouveau-y += core/engine/device/nv30.o +nouveau-y += core/engine/device/nv40.o +nouveau-y += core/engine/device/nv50.o +nouveau-y += core/engine/device/nvc0.o +nouveau-y += core/engine/device/nve0.o +nouveau-y += core/engine/disp/base.o +nouveau-y += core/engine/disp/nv04.o +nouveau-y += core/engine/disp/nv50.o +nouveau-y += core/engine/disp/nv84.o +nouveau-y += core/engine/disp/nv94.o +nouveau-y += core/engine/disp/nva0.o +nouveau-y += core/engine/disp/nva3.o +nouveau-y += core/engine/disp/nvd0.o +nouveau-y += core/engine/disp/nve0.o +nouveau-y += core/engine/disp/nvf0.o +nouveau-y += core/engine/disp/dacnv50.o +nouveau-y += core/engine/disp/dport.o +nouveau-y += core/engine/disp/hdanva3.o +nouveau-y += core/engine/disp/hdanvd0.o +nouveau-y += core/engine/disp/hdminv84.o +nouveau-y += core/engine/disp/hdminva3.o +nouveau-y += core/engine/disp/hdminvd0.o +nouveau-y += core/engine/disp/piornv50.o +nouveau-y += core/engine/disp/sornv50.o +nouveau-y += core/engine/disp/sornv94.o +nouveau-y += core/engine/disp/sornvd0.o +nouveau-y += core/engine/disp/vga.o +nouveau-y += core/engine/fifo/base.o +nouveau-y += core/engine/fifo/nv04.o +nouveau-y += core/engine/fifo/nv10.o +nouveau-y += core/engine/fifo/nv17.o +nouveau-y += core/engine/fifo/nv40.o +nouveau-y += core/engine/fifo/nv50.o +nouveau-y += core/engine/fifo/nv84.o +nouveau-y += core/engine/fifo/nvc0.o +nouveau-y += core/engine/fifo/nve0.o +nouveau-y += core/engine/graph/ctxnv40.o +nouveau-y += core/engine/graph/ctxnv50.o +nouveau-y += core/engine/graph/ctxnvc0.o +nouveau-y += core/engine/graph/ctxnve0.o +nouveau-y += core/engine/graph/nv04.o +nouveau-y += core/engine/graph/nv10.o +nouveau-y += core/engine/graph/nv20.o +nouveau-y += core/engine/graph/nv25.o +nouveau-y += core/engine/graph/nv2a.o +nouveau-y += core/engine/graph/nv30.o +nouveau-y += core/engine/graph/nv34.o +nouveau-y += core/engine/graph/nv35.o +nouveau-y += core/engine/graph/nv40.o +nouveau-y += core/engine/graph/nv50.o +nouveau-y += core/engine/graph/nvc0.o +nouveau-y += core/engine/graph/nve0.o +nouveau-y += core/engine/mpeg/nv31.o +nouveau-y += core/engine/mpeg/nv40.o +nouveau-y += core/engine/mpeg/nv50.o +nouveau-y += core/engine/mpeg/nv84.o +nouveau-y += core/engine/ppp/nv98.o +nouveau-y += core/engine/ppp/nvc0.o +nouveau-y += core/engine/software/nv04.o +nouveau-y += core/engine/software/nv10.o +nouveau-y += core/engine/software/nv50.o +nouveau-y += core/engine/software/nvc0.o +nouveau-y += core/engine/vp/nv84.o +nouveau-y += core/engine/vp/nvc0.o +nouveau-y += core/engine/vp/nve0.o + +# drm/core +nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o +nouveau-y += nouveau_vga.o nouveau_agp.o +nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o +nouveau-y += nouveau_prime.o nouveau_abi16.o +nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o +nouveau-y += nv50_fence.o nv84_fence.o nvc0_fence.o + +# drm/kms +nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o +nouveau-y += nouveau_connector.o nouveau_dp.o +nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o + +# drm/kms/nv04:nv50 +include $(src)/dispnv04/Makefile + +# drm/kms/nv50- +nouveau-y += nv50_display.o + +# drm/pm +nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o +nouveau-y += nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o +nouveau-y += nouveau_mem.o + +# other random bits +nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o +ifdef CONFIG_X86 +nouveau-$(CONFIG_ACPI) += nouveau_acpi.o +endif +nouveau-$(CPTCFG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o +nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o + +obj-$(CPTCFG_DRM_NOUVEAU)+= nouveau.o diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c new file mode 100644 index 0000000..9079c0a --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/client.c @@ -0,0 +1,112 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <core/client.h> +#include <core/handle.h> +#include <core/option.h> + +#include <engine/device.h> + +static void +nouveau_client_dtor(struct nouveau_object *object) +{ + struct nouveau_client *client = (void *)object; + nouveau_object_ref(NULL, &client->device); + nouveau_handle_destroy(client->root); + nouveau_namedb_destroy(&client->base); +} + +static struct nouveau_oclass +nouveau_client_oclass = { + .ofuncs = &(struct nouveau_ofuncs) { + .dtor = nouveau_client_dtor, + }, +}; + +int +nouveau_client_create_(const char *name, u64 devname, const char *cfg, + const char *dbg, int length, void **pobject) +{ + struct nouveau_object *device; + struct nouveau_client *client; + int ret; + + device = (void *)nouveau_device_find(devname); + if (!device) + return -ENODEV; + + ret = nouveau_namedb_create_(NULL, NULL, &nouveau_client_oclass, + NV_CLIENT_CLASS, NULL, + (1ULL << NVDEV_ENGINE_DEVICE), + length, pobject); + client = *pobject; + if (ret) + return ret; + + ret = nouveau_handle_create(nv_object(client), ~0, ~0, + nv_object(client), &client->root); + if (ret) + return ret; + + /* prevent init/fini being called, os in in charge of this */ + atomic_set(&nv_object(client)->usecount, 2); + + nouveau_object_ref(device, &client->device); + snprintf(client->name, sizeof(client->name), "%s", name); + client->debug = nouveau_dbgopt(dbg, "CLIENT"); + return 0; +} + +int +nouveau_client_init(struct nouveau_client *client) +{ + int ret; + nv_debug(client, "init running\n"); + ret = nouveau_handle_init(client->root); + nv_debug(client, "init completed with %d\n", ret); + return ret; +} + +int +nouveau_client_fini(struct nouveau_client *client, bool suspend) +{ + const char *name[2] = { "fini", "suspend" }; + int ret; + + nv_debug(client, "%s running\n", name[suspend]); + ret = nouveau_handle_fini(client->root, suspend); + nv_debug(client, "%s completed with %d\n", name[suspend], ret); + return ret; +} + +const char * +nouveau_client_name(void *obj) +{ + const char *client_name = "unknown"; + struct nouveau_client *client = nouveau_client(obj); + if (client) + client_name = client->name; + return client_name; +} diff --git a/drivers/gpu/drm/nouveau/core/core/engctx.c b/drivers/gpu/drm/nouveau/core/core/engctx.c new file mode 100644 index 0000000..84c71fa --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/engctx.c @@ -0,0 +1,251 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <core/namedb.h> +#include <core/handle.h> +#include <core/client.h> +#include <core/engctx.h> + +#include <subdev/vm.h> + +static inline int +nouveau_engctx_exists(struct nouveau_object *parent, + struct nouveau_engine *engine, void **pobject) +{ + struct nouveau_engctx *engctx; + struct nouveau_object *parctx; + + list_for_each_entry(engctx, &engine->contexts, head) { + parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS); + if (parctx == parent) { + atomic_inc(&nv_object(engctx)->refcount); + *pobject = engctx; + return 1; + } + } + + return 0; +} + +int +nouveau_engctx_create_(struct nouveau_object *parent, + struct nouveau_object *engobj, + struct nouveau_oclass *oclass, + struct nouveau_object *pargpu, + u32 size, u32 align, u32 flags, + int length, void **pobject) +{ + struct nouveau_client *client = nouveau_client(parent); + struct nouveau_engine *engine = nv_engine(engobj); + struct nouveau_object *engctx; + unsigned long save; + int ret; + + /* check if this engine already has a context for the parent object, + * and reference it instead of creating a new one + */ + spin_lock_irqsave(&engine->lock, save); + ret = nouveau_engctx_exists(parent, engine, pobject); + spin_unlock_irqrestore(&engine->lock, save); + if (ret) + return ret; + + /* create the new context, supports creating both raw objects and + * objects backed by instance memory + */ + if (size) { + ret = nouveau_gpuobj_create_(parent, engobj, oclass, + NV_ENGCTX_CLASS, + pargpu, size, align, flags, + length, pobject); + } else { + ret = nouveau_object_create_(parent, engobj, oclass, + NV_ENGCTX_CLASS, length, pobject); + } + + engctx = *pobject; + if (ret) + return ret; + + /* must take the lock again and re-check a context doesn't already + * exist (in case of a race) - the lock had to be dropped before as + * it's not possible to allocate the object with it held. + */ + spin_lock_irqsave(&engine->lock, save); + ret = nouveau_engctx_exists(parent, engine, pobject); + if (ret) { + spin_unlock_irqrestore(&engine->lock, save); + nouveau_object_ref(NULL, &engctx); + return ret; + } + + if (client->vm) + atomic_inc(&client->vm->engref[nv_engidx(engobj)]); + list_add(&nv_engctx(engctx)->head, &engine->contexts); + nv_engctx(engctx)->addr = ~0ULL; + spin_unlock_irqrestore(&engine->lock, save); + return 0; +} + +void +nouveau_engctx_destroy(struct nouveau_engctx *engctx) +{ + struct nouveau_object *engobj = nv_object(engctx)->engine; + struct nouveau_engine *engine = nv_engine(engobj); + struct nouveau_client *client = nouveau_client(engctx); + unsigned long save; + + nouveau_gpuobj_unmap(&engctx->vma); + spin_lock_irqsave(&engine->lock, save); + list_del(&engctx->head); + spin_unlock_irqrestore(&engine->lock, save); + + if (client->vm) + atomic_dec(&client->vm->engref[nv_engidx(engobj)]); + + if (engctx->base.size) + nouveau_gpuobj_destroy(&engctx->base); + else + nouveau_object_destroy(&engctx->base.base); +} + +int +nouveau_engctx_init(struct nouveau_engctx *engctx) +{ + struct nouveau_object *object = nv_object(engctx); + struct nouveau_subdev *subdev = nv_subdev(object->engine); + struct nouveau_object *parent; + struct nouveau_subdev *pardev; + int ret; + + ret = nouveau_gpuobj_init(&engctx->base); + if (ret) + return ret; + + parent = nv_pclass(object->parent, NV_PARENT_CLASS); + pardev = nv_subdev(parent->engine); + if (nv_parent(parent)->context_attach) { + mutex_lock(&pardev->mutex); + ret = nv_parent(parent)->context_attach(parent, object); + mutex_unlock(&pardev->mutex); + } + + if (ret) { + nv_error(parent, "failed to attach %s context, %d\n", + subdev->name, ret); + return ret; + } + + nv_debug(parent, "attached %s context\n", subdev->name); + return 0; +} + +int +nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend) +{ + struct nouveau_object *object = nv_object(engctx); + struct nouveau_subdev *subdev = nv_subdev(object->engine); + struct nouveau_object *parent; + struct nouveau_subdev *pardev; + int ret = 0; + + parent = nv_pclass(object->parent, NV_PARENT_CLASS); + pardev = nv_subdev(parent->engine); + if (nv_parent(parent)->context_detach) { + mutex_lock(&pardev->mutex); + ret = nv_parent(parent)->context_detach(parent, suspend, object); + mutex_unlock(&pardev->mutex); + } + + if (ret) { + nv_error(parent, "failed to detach %s context, %d\n", + subdev->name, ret); + return ret; + } + + nv_debug(parent, "detached %s context\n", subdev->name); + return nouveau_gpuobj_fini(&engctx->base, suspend); +} + +int +_nouveau_engctx_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_engctx *engctx; + int ret; + + ret = nouveau_engctx_create(parent, engine, oclass, NULL, 256, 256, + NVOBJ_FLAG_ZERO_ALLOC, &engctx); + *pobject = nv_object(engctx); + return ret; +} + +void +_nouveau_engctx_dtor(struct nouveau_object *object) +{ + nouveau_engctx_destroy(nv_engctx(object)); +} + +int +_nouveau_engctx_init(struct nouveau_object *object) +{ + return nouveau_engctx_init(nv_engctx(object)); +} + + +int +_nouveau_engctx_fini(struct nouveau_object *object, bool suspend) +{ + return nouveau_engctx_fini(nv_engctx(object), suspend); +} + +struct nouveau_object * +nouveau_engctx_get(struct nouveau_engine *engine, u64 addr) +{ + struct nouveau_engctx *engctx; + unsigned long flags; + + spin_lock_irqsave(&engine->lock, flags); + list_for_each_entry(engctx, &engine->contexts, head) { + if (engctx->addr == addr) { + engctx->save = flags; + return nv_object(engctx); + } + } + spin_unlock_irqrestore(&engine->lock, flags); + return NULL; +} + +void +nouveau_engctx_put(struct nouveau_object *object) +{ + if (object) { + struct nouveau_engine *engine = nv_engine(object->engine); + struct nouveau_engctx *engctx = nv_engctx(object); + spin_unlock_irqrestore(&engine->lock, engctx->save); + } +} diff --git a/drivers/gpu/drm/nouveau/core/core/engine.c b/drivers/gpu/drm/nouveau/core/core/engine.c new file mode 100644 index 0000000..c8bed4a --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/engine.c @@ -0,0 +1,55 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/device.h> +#include <core/engine.h> +#include <core/option.h> + +int +nouveau_engine_create_(struct nouveau_object *parent, + struct nouveau_object *engobj, + struct nouveau_oclass *oclass, bool enable, + const char *iname, const char *fname, + int length, void **pobject) +{ + struct nouveau_engine *engine; + int ret; + + ret = nouveau_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS, + iname, fname, length, pobject); + engine = *pobject; + if (ret) + return ret; + + if ( parent && + !nouveau_boolopt(nv_device(parent)->cfgopt, iname, enable)) { + if (!enable) + nv_warn(engine, "disabled, %s=1 to enable\n", iname); + return -ENODEV; + } + + INIT_LIST_HEAD(&engine->contexts); + spin_lock_init(&engine->lock); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/core/enum.c b/drivers/gpu/drm/nouveau/core/core/enum.c new file mode 100644 index 0000000..dd43479 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/enum.c @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2010 Nouveau Project + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <core/os.h> +#include <core/enum.h> + +const struct nouveau_enum * +nouveau_enum_find(const struct nouveau_enum *en, u32 value) +{ + while (en->name) { + if (en->value == value) + return en; + en++; + } + + return NULL; +} + +const struct nouveau_enum * +nouveau_enum_print(const struct nouveau_enum *en, u32 value) +{ + en = nouveau_enum_find(en, value); + if (en) + pr_cont("%s", en->name); + else + pr_cont("(unknown enum 0x%08x)", value); + return en; +} + +void +nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value) +{ + while (bf->name) { + if (value & bf->mask) { + pr_cont(" %s", bf->name); + value &= ~bf->mask; + } + + bf++; + } + + if (value) + pr_cont(" (unknown bits 0x%08x)", value); +} diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c new file mode 100644 index 0000000..7eb81c1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/event.c @@ -0,0 +1,110 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <core/os.h> +#include <core/event.h> + +static void +nouveau_event_put_locked(struct nouveau_event *event, int index, + struct nouveau_eventh *handler) +{ + if (!--event->index[index].refs) { + if (event->disable) + event->disable(event, index); + } + list_del(&handler->head); +} + +void +nouveau_event_put(struct nouveau_event *event, int index, + struct nouveau_eventh *handler) +{ + unsigned long flags; + + spin_lock_irqsave(&event->lock, flags); + if (index < event->index_nr) + nouveau_event_put_locked(event, index, handler); + spin_unlock_irqrestore(&event->lock, flags); +} + +void +nouveau_event_get(struct nouveau_event *event, int index, + struct nouveau_eventh *handler) +{ + unsigned long flags; + + spin_lock_irqsave(&event->lock, flags); + if (index < event->index_nr) { + list_add(&handler->head, &event->index[index].list); + if (!event->index[index].refs++) { + if (event->enable) + event->enable(event, index); + } + } + spin_unlock_irqrestore(&event->lock, flags); +} + +void +nouveau_event_trigger(struct nouveau_event *event, int index) +{ + struct nouveau_eventh *handler, *temp; + unsigned long flags; + + if (index >= event->index_nr) + return; + + spin_lock_irqsave(&event->lock, flags); + list_for_each_entry_safe(handler, temp, &event->index[index].list, head) { + if (handler->func(handler, index) == NVKM_EVENT_DROP) { + nouveau_event_put_locked(event, index, handler); + } + } + spin_unlock_irqrestore(&event->lock, flags); +} + +void +nouveau_event_destroy(struct nouveau_event **pevent) +{ + struct nouveau_event *event = *pevent; + if (event) { + kfree(event); + *pevent = NULL; + } +} + +int +nouveau_event_create(int index_nr, struct nouveau_event **pevent) +{ + struct nouveau_event *event; + int i; + + event = *pevent = kzalloc(sizeof(*event) + index_nr * + sizeof(event->index[0]), GFP_KERNEL); + if (!event) + return -ENOMEM; + + spin_lock_init(&event->lock); + for (i = 0; i < index_nr; i++) + INIT_LIST_HEAD(&event->index[i].list); + event->index_nr = index_nr; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/core/falcon.c b/drivers/gpu/drm/nouveau/core/core/falcon.c new file mode 100644 index 0000000..e05c157 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/falcon.c @@ -0,0 +1,250 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <core/falcon.h> + +#include <subdev/timer.h> + +u32 +_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) +{ + struct nouveau_falcon *falcon = (void *)object; + return nv_rd32(falcon, falcon->addr + addr); +} + +void +_nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data) +{ + struct nouveau_falcon *falcon = (void *)object; + nv_wr32(falcon, falcon->addr + addr, data); +} + +int +_nouveau_falcon_init(struct nouveau_object *object) +{ + struct nouveau_device *device = nv_device(object); + struct nouveau_falcon *falcon = (void *)object; + const struct firmware *fw; + char name[32] = "internal"; + int ret, i; + u32 caps; + + /* enable engine, and determine its capabilities */ + ret = nouveau_engine_init(&falcon->base); + if (ret) + return ret; + + if (device->chipset < 0xa3 || + device->chipset == 0xaa || device->chipset == 0xac) { + falcon->version = 0; + falcon->secret = (falcon->addr == 0x087000) ? 1 : 0; + } else { + caps = nv_ro32(falcon, 0x12c); + falcon->version = (caps & 0x0000000f); + falcon->secret = (caps & 0x00000030) >> 4; + } + + caps = nv_ro32(falcon, 0x108); + falcon->code.limit = (caps & 0x000001ff) << 8; + falcon->data.limit = (caps & 0x0003fe00) >> 1; + + nv_debug(falcon, "falcon version: %d\n", falcon->version); + nv_debug(falcon, "secret level: %d\n", falcon->secret); + nv_debug(falcon, "code limit: %d\n", falcon->code.limit); + nv_debug(falcon, "data limit: %d\n", falcon->data.limit); + + /* wait for 'uc halted' to be signalled before continuing */ + if (falcon->secret && falcon->version < 4) { + if (!falcon->version) + nv_wait(falcon, 0x008, 0x00000010, 0x00000010); + else + nv_wait(falcon, 0x180, 0x80000000, 0); + nv_wo32(falcon, 0x004, 0x00000010); + } + + /* disable all interrupts */ + nv_wo32(falcon, 0x014, 0xffffffff); + + /* no default ucode provided by the engine implementation, try and + * locate a "self-bootstrapping" firmware image for the engine + */ + if (!falcon->code.data) { + snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x", + device->chipset, falcon->addr >> 12); + + ret = request_firmware(&fw, name, &device->pdev->dev); + if (ret == 0) { + falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL); + falcon->code.size = fw->size; + falcon->data.data = NULL; + falcon->data.size = 0; + release_firmware(fw); + } + + falcon->external = true; + } + + /* next step is to try and load "static code/data segment" firmware + * images for the engine + */ + if (!falcon->code.data) { + snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd", + device->chipset, falcon->addr >> 12); + + ret = request_firmware(&fw, name, &device->pdev->dev); + if (ret) { + nv_error(falcon, "unable to load firmware data\n"); + return ret; + } + + falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL); + falcon->data.size = fw->size; + release_firmware(fw); + if (!falcon->data.data) + return -ENOMEM; + + snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc", + device->chipset, falcon->addr >> 12); + + ret = request_firmware(&fw, name, &device->pdev->dev); + if (ret) { + nv_error(falcon, "unable to load firmware code\n"); + return ret; + } + + falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL); + falcon->code.size = fw->size; + release_firmware(fw); + if (!falcon->code.data) + return -ENOMEM; + } + + nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ? + "static code/data segments" : "self-bootstrapping"); + + /* ensure any "self-bootstrapping" firmware image is in vram */ + if (!falcon->data.data && !falcon->core) { + ret = nouveau_gpuobj_new(object->parent, NULL, + falcon->code.size, 256, 0, + &falcon->core); + if (ret) { + nv_error(falcon, "core allocation failed, %d\n", ret); + return ret; + } + + for (i = 0; i < falcon->code.size; i += 4) + nv_wo32(falcon->core, i, falcon->code.data[i / 4]); + } + + /* upload firmware bootloader (or the full code segments) */ + if (falcon->core) { + if (device->card_type < NV_C0) + nv_wo32(falcon, 0x618, 0x04000000); + else + nv_wo32(falcon, 0x618, 0x00000114); + nv_wo32(falcon, 0x11c, 0); + nv_wo32(falcon, 0x110, falcon->core->addr >> 8); + nv_wo32(falcon, 0x114, 0); + nv_wo32(falcon, 0x118, 0x00006610); + } else { + if (falcon->code.size > falcon->code.limit || + falcon->data.size > falcon->data.limit) { + nv_error(falcon, "ucode exceeds falcon limit(s)\n"); + return -EINVAL; + } + + if (falcon->version < 3) { + nv_wo32(falcon, 0xff8, 0x00100000); + for (i = 0; i < falcon->code.size / 4; i++) + nv_wo32(falcon, 0xff4, falcon->code.data[i]); + } else { + nv_wo32(falcon, 0x180, 0x01000000); + for (i = 0; i < falcon->code.size / 4; i++) { + if ((i & 0x3f) == 0) + nv_wo32(falcon, 0x188, i >> 6); + nv_wo32(falcon, 0x184, falcon->code.data[i]); + } + } + } + + /* upload data segment (if necessary), zeroing the remainder */ + if (falcon->version < 3) { + nv_wo32(falcon, 0xff8, 0x00000000); + for (i = 0; !falcon->core && i < falcon->data.size / 4; i++) + nv_wo32(falcon, 0xff4, falcon->data.data[i]); + for (; i < falcon->data.limit; i += 4) + nv_wo32(falcon, 0xff4, 0x00000000); + } else { + nv_wo32(falcon, 0x1c0, 0x01000000); + for (i = 0; !falcon->core && i < falcon->data.size / 4; i++) + nv_wo32(falcon, 0x1c4, falcon->data.data[i]); + for (; i < falcon->data.limit / 4; i++) + nv_wo32(falcon, 0x1c4, 0x00000000); + } + + /* start it running */ + nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */ + nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */ + nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */ + nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */ + return 0; +} + +int +_nouveau_falcon_fini(struct nouveau_object *object, bool suspend) +{ + struct nouveau_falcon *falcon = (void *)object; + + if (!suspend) { + nouveau_gpuobj_ref(NULL, &falcon->core); + if (falcon->external) { + kfree(falcon->data.data); + kfree(falcon->code.data); + falcon->code.data = NULL; + } + } + + nv_mo32(falcon, 0x048, 0x00000003, 0x00000000); + nv_wo32(falcon, 0x014, 0xffffffff); + + return nouveau_engine_fini(&falcon->base, suspend); +} + +int +nouveau_falcon_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, u32 addr, bool enable, + const char *iname, const char *fname, + int length, void **pobject) +{ + struct nouveau_falcon *falcon; + int ret; + + ret = nouveau_engine_create_(parent, engine, oclass, enable, iname, + fname, length, pobject); + falcon = *pobject; + if (ret) + return ret; + + falcon->addr = addr; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c new file mode 100644 index 0000000..560b221 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c @@ -0,0 +1,323 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <core/gpuobj.h> + +#include <subdev/instmem.h> +#include <subdev/bar.h> +#include <subdev/vm.h> + +void +nouveau_gpuobj_destroy(struct nouveau_gpuobj *gpuobj) +{ + int i; + + if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) { + for (i = 0; i < gpuobj->size; i += 4) + nv_wo32(gpuobj, i, 0x00000000); + } + + if (gpuobj->node) { + nouveau_mm_free(&nv_gpuobj(gpuobj->parent)->heap, + &gpuobj->node); + } + + if (gpuobj->heap.block_size) + nouveau_mm_fini(&gpuobj->heap); + + nouveau_object_destroy(&gpuobj->base); +} + +int +nouveau_gpuobj_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, u32 pclass, + struct nouveau_object *pargpu, + u32 size, u32 align, u32 flags, + int length, void **pobject) +{ + struct nouveau_instmem *imem = nouveau_instmem(parent); + struct nouveau_bar *bar = nouveau_bar(parent); + struct nouveau_gpuobj *gpuobj; + struct nouveau_mm *heap = NULL; + int ret, i; + u64 addr; + + *pobject = NULL; + + if (pargpu) { + while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) { + if (nv_gpuobj(pargpu)->heap.block_size) + break; + pargpu = pargpu->parent; + } + + if (unlikely(pargpu == NULL)) { + nv_error(parent, "no gpuobj heap\n"); + return -EINVAL; + } + + addr = nv_gpuobj(pargpu)->addr; + heap = &nv_gpuobj(pargpu)->heap; + atomic_inc(&parent->refcount); + } else { + ret = imem->alloc(imem, parent, size, align, &parent); + pargpu = parent; + if (ret) + return ret; + + addr = nv_memobj(pargpu)->addr; + size = nv_memobj(pargpu)->size; + + if (bar && bar->alloc) { + struct nouveau_instobj *iobj = (void *)parent; + struct nouveau_mem **mem = (void *)(iobj + 1); + struct nouveau_mem *node = *mem; + if (!bar->alloc(bar, parent, node, &pargpu)) { + nouveau_object_ref(NULL, &parent); + parent = pargpu; + } + } + } + + ret = nouveau_object_create_(parent, engine, oclass, pclass | + NV_GPUOBJ_CLASS, length, pobject); + nouveau_object_ref(NULL, &parent); + gpuobj = *pobject; + if (ret) + return ret; + + gpuobj->parent = pargpu; + gpuobj->flags = flags; + gpuobj->addr = addr; + gpuobj->size = size; + + if (heap) { + ret = nouveau_mm_head(heap, 1, size, size, + max(align, (u32)1), &gpuobj->node); + if (ret) + return ret; + + gpuobj->addr += gpuobj->node->offset; + } + + if (gpuobj->flags & NVOBJ_FLAG_HEAP) { + ret = nouveau_mm_init(&gpuobj->heap, 0, gpuobj->size, 1); + if (ret) + return ret; + } + + if (flags & NVOBJ_FLAG_ZERO_ALLOC) { + for (i = 0; i < gpuobj->size; i += 4) + nv_wo32(gpuobj, i, 0x00000000); + } + + return ret; +} + +struct nouveau_gpuobj_class { + struct nouveau_object *pargpu; + u64 size; + u32 align; + u32 flags; +}; + +static int +_nouveau_gpuobj_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_gpuobj_class *args = data; + struct nouveau_gpuobj *object; + int ret; + + ret = nouveau_gpuobj_create(parent, engine, oclass, 0, args->pargpu, + args->size, args->align, args->flags, + &object); + *pobject = nv_object(object); + if (ret) + return ret; + + return 0; +} + +void +_nouveau_gpuobj_dtor(struct nouveau_object *object) +{ + nouveau_gpuobj_destroy(nv_gpuobj(object)); +} + +int +_nouveau_gpuobj_init(struct nouveau_object *object) +{ + return nouveau_gpuobj_init(nv_gpuobj(object)); +} + +int +_nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend) +{ + return nouveau_gpuobj_fini(nv_gpuobj(object), suspend); +} + +u32 +_nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr) +{ + struct nouveau_gpuobj *gpuobj = nv_gpuobj(object); + struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); + if (gpuobj->node) + addr += gpuobj->node->offset; + return pfuncs->rd32(gpuobj->parent, addr); +} + +void +_nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data) +{ + struct nouveau_gpuobj *gpuobj = nv_gpuobj(object); + struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); + if (gpuobj->node) + addr += gpuobj->node->offset; + pfuncs->wr32(gpuobj->parent, addr, data); +} + +static struct nouveau_oclass +_nouveau_gpuobj_oclass = { + .handle = 0x00000000, + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nouveau_gpuobj_ctor, + .dtor = _nouveau_gpuobj_dtor, + .init = _nouveau_gpuobj_init, + .fini = _nouveau_gpuobj_fini, + .rd32 = _nouveau_gpuobj_rd32, + .wr32 = _nouveau_gpuobj_wr32, + }, +}; + +int +nouveau_gpuobj_new(struct nouveau_object *parent, struct nouveau_object *pargpu, + u32 size, u32 align, u32 flags, + struct nouveau_gpuobj **pgpuobj) +{ + struct nouveau_object *engine = parent; + struct nouveau_gpuobj_class args = { + .pargpu = pargpu, + .size = size, + .align = align, + .flags = flags, + }; + + if (!nv_iclass(engine, NV_SUBDEV_CLASS)) + engine = engine->engine; + BUG_ON(engine == NULL); + + return nouveau_object_ctor(parent, engine, &_nouveau_gpuobj_oclass, + &args, sizeof(args), + (struct nouveau_object **)pgpuobj); +} + +int +nouveau_gpuobj_map(struct nouveau_gpuobj *gpuobj, u32 access, + struct nouveau_vma *vma) +{ + struct nouveau_bar *bar = nouveau_bar(gpuobj); + int ret = -EINVAL; + + if (bar && bar->umap) { + struct nouveau_instobj *iobj = (void *) + nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS); + struct nouveau_mem **mem = (void *)(iobj + 1); + ret = bar->umap(bar, *mem, access, vma); + } + + return ret; +} + +int +nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, struct nouveau_vm *vm, + u32 access, struct nouveau_vma *vma) +{ + struct nouveau_instobj *iobj = (void *) + nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS); + struct nouveau_mem **mem = (void *)(iobj + 1); + int ret; + + ret = nouveau_vm_get(vm, gpuobj->size, 12, access, vma); + if (ret) + return ret; + + nouveau_vm_map(vma, *mem); + return 0; +} + +void +nouveau_gpuobj_unmap(struct nouveau_vma *vma) +{ + if (vma->node) { + nouveau_vm_unmap(vma); + nouveau_vm_put(vma); + } +} + +/* the below is basically only here to support sharing the paged dma object + * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work + * anywhere else. + */ + +static void +nouveau_gpudup_dtor(struct nouveau_object *object) +{ + struct nouveau_gpuobj *gpuobj = (void *)object; + nouveau_object_ref(NULL, &gpuobj->parent); + nouveau_object_destroy(&gpuobj->base); +} + +static struct nouveau_oclass +nouveau_gpudup_oclass = { + .handle = NV_GPUOBJ_CLASS, + .ofuncs = &(struct nouveau_ofuncs) { + .dtor = nouveau_gpudup_dtor, + .init = nouveau_object_init, + .fini = nouveau_object_fini, + }, +}; + +int +nouveau_gpuobj_dup(struct nouveau_object *parent, struct nouveau_gpuobj *base, + struct nouveau_gpuobj **pgpuobj) +{ + struct nouveau_gpuobj *gpuobj; + int ret; + + ret = nouveau_object_create(parent, parent->engine, + &nouveau_gpudup_oclass, 0, &gpuobj); + *pgpuobj = gpuobj; + if (ret) + return ret; + + nouveau_object_ref(nv_object(base), &gpuobj->parent); + gpuobj->addr = base->addr; + gpuobj->size = base->size; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c new file mode 100644 index 0000000..264c2b3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/handle.c @@ -0,0 +1,226 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <core/handle.h> +#include <core/client.h> + +#define hprintk(h,l,f,a...) do { \ + struct nouveau_client *c = nouveau_client((h)->object); \ + struct nouveau_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \ + nv_printk((c), l, "0x%08x:0x%08x "f, n, (h)->name, ##a); \ +} while(0) + +int +nouveau_handle_init(struct nouveau_handle *handle) +{ + struct nouveau_handle *item; + int ret; + + hprintk(handle, TRACE, "init running\n"); + ret = nouveau_object_inc(handle->object); + if (ret) + return ret; + + hprintk(handle, TRACE, "init children\n"); + list_for_each_entry(item, &handle->tree, head) { + ret = nouveau_handle_init(item); + if (ret) + goto fail; + } + + hprintk(handle, TRACE, "init completed\n"); + return 0; +fail: + hprintk(handle, ERROR, "init failed with %d\n", ret); + list_for_each_entry_continue_reverse(item, &handle->tree, head) { + nouveau_handle_fini(item, false); + } + + nouveau_object_dec(handle->object, false); + return ret; +} + +int +nouveau_handle_fini(struct nouveau_handle *handle, bool suspend) +{ + static char *name[2] = { "fini", "suspend" }; + struct nouveau_handle *item; + int ret; + + hprintk(handle, TRACE, "%s children\n", name[suspend]); + list_for_each_entry(item, &handle->tree, head) { + ret = nouveau_handle_fini(item, suspend); + if (ret && suspend) + goto fail; + } + + hprintk(handle, TRACE, "%s running\n", name[suspend]); + if (handle->object) { + ret = nouveau_object_dec(handle->object, suspend); + if (ret && suspend) + goto fail; + } + + hprintk(handle, TRACE, "%s completed\n", name[suspend]); + return 0; +fail: + hprintk(handle, ERROR, "%s failed with %d\n", name[suspend], ret); + list_for_each_entry_continue_reverse(item, &handle->tree, head) { + int rret = nouveau_handle_init(item); + if (rret) + hprintk(handle, FATAL, "failed to restart, %d\n", rret); + } + + return ret; +} + +int +nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle, + struct nouveau_object *object, + struct nouveau_handle **phandle) +{ + struct nouveau_object *namedb; + struct nouveau_handle *handle; + int ret; + + namedb = parent; + while (!nv_iclass(namedb, NV_NAMEDB_CLASS)) + namedb = namedb->parent; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + INIT_LIST_HEAD(&handle->head); + INIT_LIST_HEAD(&handle->tree); + handle->name = _handle; + handle->priv = ~0; + + ret = nouveau_namedb_insert(nv_namedb(namedb), _handle, object, handle); + if (ret) { + kfree(handle); + return ret; + } + + if (nv_parent(parent)->object_attach) { + ret = nv_parent(parent)->object_attach(parent, object, _handle); + if (ret < 0) { + nouveau_handle_destroy(handle); + return ret; + } + + handle->priv = ret; + } + + if (object != namedb) { + while (!nv_iclass(namedb, NV_CLIENT_CLASS)) + namedb = namedb->parent; + + handle->parent = nouveau_namedb_get(nv_namedb(namedb), _parent); + if (handle->parent) { + list_add(&handle->head, &handle->parent->tree); + nouveau_namedb_put(handle->parent); + } + } + + hprintk(handle, TRACE, "created\n"); + + *phandle = handle; + + return 0; +} + +void +nouveau_handle_destroy(struct nouveau_handle *handle) +{ + struct nouveau_handle *item, *temp; + + hprintk(handle, TRACE, "destroy running\n"); + list_for_each_entry_safe(item, temp, &handle->tree, head) { + nouveau_handle_destroy(item); + } + list_del(&handle->head); + + if (handle->priv != ~0) { + struct nouveau_object *parent = handle->parent->object; + nv_parent(parent)->object_detach(parent, handle->priv); + } + + hprintk(handle, TRACE, "destroy completed\n"); + nouveau_namedb_remove(handle); + kfree(handle); +} + +struct nouveau_object * +nouveau_handle_ref(struct nouveau_object *parent, u32 name) +{ + struct nouveau_object *object = NULL; + struct nouveau_handle *handle; + + while (!nv_iclass(parent, NV_NAMEDB_CLASS)) + parent = parent->parent; + + handle = nouveau_namedb_get(nv_namedb(parent), name); + if (handle) { + nouveau_object_ref(handle->object, &object); + nouveau_namedb_put(handle); + } + + return object; +} + +struct nouveau_handle * +nouveau_handle_get_class(struct nouveau_object *engctx, u16 oclass) +{ + struct nouveau_namedb *namedb; + if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) + return nouveau_namedb_get_class(namedb, oclass); + return NULL; +} + +struct nouveau_handle * +nouveau_handle_get_vinst(struct nouveau_object *engctx, u64 vinst) +{ + struct nouveau_namedb *namedb; + if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) + return nouveau_namedb_get_vinst(namedb, vinst); + return NULL; +} + +struct nouveau_handle * +nouveau_handle_get_cinst(struct nouveau_object *engctx, u32 cinst) +{ + struct nouveau_namedb *namedb; + if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS))) + return nouveau_namedb_get_cinst(namedb, cinst); + return NULL; +} + +void +nouveau_handle_put(struct nouveau_handle *handle) +{ + if (handle) + nouveau_namedb_put(handle); +} diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c new file mode 100644 index 0000000..0261a11 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/mm.c @@ -0,0 +1,251 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "core/os.h" +#include "core/mm.h" + +#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \ + list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry) + +void +nouveau_mm_free(struct nouveau_mm *mm, struct nouveau_mm_node **pthis) +{ + struct nouveau_mm_node *this = *pthis; + + if (this) { + struct nouveau_mm_node *prev = node(this, prev); + struct nouveau_mm_node *next = node(this, next); + + if (prev && prev->type == 0) { + prev->length += this->length; + list_del(&this->nl_entry); + kfree(this); this = prev; + } + + if (next && next->type == 0) { + next->offset = this->offset; + next->length += this->length; + if (this->type == 0) + list_del(&this->fl_entry); + list_del(&this->nl_entry); + kfree(this); this = NULL; + } + + if (this && this->type != 0) { + list_for_each_entry(prev, &mm->free, fl_entry) { + if (this->offset < prev->offset) + break; + } + + list_add_tail(&this->fl_entry, &prev->fl_entry); + this->type = 0; + } + } + + *pthis = NULL; +} + +static struct nouveau_mm_node * +region_head(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size) +{ + struct nouveau_mm_node *b; + + if (a->length == size) + return a; + + b = kmalloc(sizeof(*b), GFP_KERNEL); + if (unlikely(b == NULL)) + return NULL; + + b->offset = a->offset; + b->length = size; + b->type = a->type; + a->offset += size; + a->length -= size; + list_add_tail(&b->nl_entry, &a->nl_entry); + if (b->type == 0) + list_add_tail(&b->fl_entry, &a->fl_entry); + return b; +} + +int +nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, + u32 align, struct nouveau_mm_node **pnode) +{ + struct nouveau_mm_node *prev, *this, *next; + u32 mask = align - 1; + u32 splitoff; + u32 s, e; + + list_for_each_entry(this, &mm->free, fl_entry) { + e = this->offset + this->length; + s = this->offset; + + prev = node(this, prev); + if (prev && prev->type != type) + s = roundup(s, mm->block_size); + + next = node(this, next); + if (next && next->type != type) + e = rounddown(e, mm->block_size); + + s = (s + mask) & ~mask; + e &= ~mask; + if (s > e || e - s < size_min) + continue; + + splitoff = s - this->offset; + if (splitoff && !region_head(mm, this, splitoff)) + return -ENOMEM; + + this = region_head(mm, this, min(size_max, e - s)); + if (!this) + return -ENOMEM; + + this->type = type; + list_del(&this->fl_entry); + *pnode = this; + return 0; + } + + return -ENOSPC; +} + +static struct nouveau_mm_node * +region_tail(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size) +{ + struct nouveau_mm_node *b; + + if (a->length == size) + return a; + + b = kmalloc(sizeof(*b), GFP_KERNEL); + if (unlikely(b == NULL)) + return NULL; + + a->length -= size; + b->offset = a->offset + a->length; + b->length = size; + b->type = a->type; + + list_add(&b->nl_entry, &a->nl_entry); + if (b->type == 0) + list_add(&b->fl_entry, &a->fl_entry); + return b; +} + +int +nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, + u32 align, struct nouveau_mm_node **pnode) +{ + struct nouveau_mm_node *prev, *this, *next; + u32 mask = align - 1; + + list_for_each_entry_reverse(this, &mm->free, fl_entry) { + u32 e = this->offset + this->length; + u32 s = this->offset; + u32 c = 0, a; + + prev = node(this, prev); + if (prev && prev->type != type) + s = roundup(s, mm->block_size); + + next = node(this, next); + if (next && next->type != type) { + e = rounddown(e, mm->block_size); + c = next->offset - e; + } + + s = (s + mask) & ~mask; + a = e - s; + if (s > e || a < size_min) + continue; + + a = min(a, size_max); + s = (e - a) & ~mask; + c += (e - s) - a; + + if (c && !region_tail(mm, this, c)) + return -ENOMEM; + + this = region_tail(mm, this, a); + if (!this) + return -ENOMEM; + + this->type = type; + list_del(&this->fl_entry); + *pnode = this; + return 0; + } + + return -ENOSPC; +} + +int +nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block) +{ + struct nouveau_mm_node *node; + + if (block) { + mutex_init(&mm->mutex); + INIT_LIST_HEAD(&mm->nodes); + INIT_LIST_HEAD(&mm->free); + mm->block_size = block; + mm->heap_nodes = 0; + } + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + if (length) { + node->offset = roundup(offset, mm->block_size); + node->length = rounddown(offset + length, mm->block_size); + node->length -= node->offset; + } + + list_add_tail(&node->nl_entry, &mm->nodes); + list_add_tail(&node->fl_entry, &mm->free); + mm->heap_nodes++; + return 0; +} + +int +nouveau_mm_fini(struct nouveau_mm *mm) +{ + if (nouveau_mm_initialised(mm)) { + struct nouveau_mm_node *node, *heap = + list_first_entry(&mm->nodes, typeof(*heap), nl_entry); + int nodes = 0; + + list_for_each_entry(node, &mm->nodes, nl_entry) { + if (WARN_ON(nodes++ == mm->heap_nodes)) + return -EBUSY; + } + + kfree(heap); + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/core/namedb.c b/drivers/gpu/drm/nouveau/core/core/namedb.c new file mode 100644 index 0000000..1ce95a8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/namedb.c @@ -0,0 +1,203 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <core/namedb.h> +#include <core/handle.h> +#include <core/gpuobj.h> + +static struct nouveau_handle * +nouveau_namedb_lookup(struct nouveau_namedb *namedb, u32 name) +{ + struct nouveau_handle *handle; + + list_for_each_entry(handle, &namedb->list, node) { + if (handle->name == name) + return handle; + } + + return NULL; +} + +static struct nouveau_handle * +nouveau_namedb_lookup_class(struct nouveau_namedb *namedb, u16 oclass) +{ + struct nouveau_handle *handle; + + list_for_each_entry(handle, &namedb->list, node) { + if (nv_mclass(handle->object) == oclass) + return handle; + } + + return NULL; +} + +static struct nouveau_handle * +nouveau_namedb_lookup_vinst(struct nouveau_namedb *namedb, u64 vinst) +{ + struct nouveau_handle *handle; + + list_for_each_entry(handle, &namedb->list, node) { + if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) { + if (nv_gpuobj(handle->object)->addr == vinst) + return handle; + } + } + + return NULL; +} + +static struct nouveau_handle * +nouveau_namedb_lookup_cinst(struct nouveau_namedb *namedb, u32 cinst) +{ + struct nouveau_handle *handle; + + list_for_each_entry(handle, &namedb->list, node) { + if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) { + if (nv_gpuobj(handle->object)->node && + nv_gpuobj(handle->object)->node->offset == cinst) + return handle; + } + } + + return NULL; +} + +int +nouveau_namedb_insert(struct nouveau_namedb *namedb, u32 name, + struct nouveau_object *object, + struct nouveau_handle *handle) +{ + int ret = -EEXIST; + write_lock_irq(&namedb->lock); + if (!nouveau_namedb_lookup(namedb, name)) { + nouveau_object_ref(object, &handle->object); + handle->namedb = namedb; + list_add(&handle->node, &namedb->list); + ret = 0; + } + write_unlock_irq(&namedb->lock); + return ret; +} + +void +nouveau_namedb_remove(struct nouveau_handle *handle) +{ + struct nouveau_namedb *namedb = handle->namedb; + struct nouveau_object *object = handle->object; + write_lock_irq(&namedb->lock); + list_del(&handle->node); + write_unlock_irq(&namedb->lock); + nouveau_object_ref(NULL, &object); +} + +struct nouveau_handle * +nouveau_namedb_get(struct nouveau_namedb *namedb, u32 name) +{ + struct nouveau_handle *handle; + read_lock(&namedb->lock); + handle = nouveau_namedb_lookup(namedb, name); + if (handle == NULL) + read_unlock(&namedb->lock); + return handle; +} + +struct nouveau_handle * +nouveau_namedb_get_class(struct nouveau_namedb *namedb, u16 oclass) +{ + struct nouveau_handle *handle; + read_lock(&namedb->lock); + handle = nouveau_namedb_lookup_class(namedb, oclass); + if (handle == NULL) + read_unlock(&namedb->lock); + return handle; +} + +struct nouveau_handle * +nouveau_namedb_get_vinst(struct nouveau_namedb *namedb, u64 vinst) +{ + struct nouveau_handle *handle; + read_lock(&namedb->lock); + handle = nouveau_namedb_lookup_vinst(namedb, vinst); + if (handle == NULL) + read_unlock(&namedb->lock); + return handle; +} + +struct nouveau_handle * +nouveau_namedb_get_cinst(struct nouveau_namedb *namedb, u32 cinst) +{ + struct nouveau_handle *handle; + read_lock(&namedb->lock); + handle = nouveau_namedb_lookup_cinst(namedb, cinst); + if (handle == NULL) + read_unlock(&namedb->lock); + return handle; +} + +void +nouveau_namedb_put(struct nouveau_handle *handle) +{ + if (handle) + read_unlock(&handle->namedb->lock); +} + +int +nouveau_namedb_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, u32 pclass, + struct nouveau_oclass *sclass, u32 engcls, + int length, void **pobject) +{ + struct nouveau_namedb *namedb; + int ret; + + ret = nouveau_parent_create_(parent, engine, oclass, pclass | + NV_NAMEDB_CLASS, sclass, engcls, + length, pobject); + namedb = *pobject; + if (ret) + return ret; + + rwlock_init(&namedb->lock); + INIT_LIST_HEAD(&namedb->list); + return 0; +} + +int +_nouveau_namedb_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_namedb *object; + int ret; + + ret = nouveau_namedb_create(parent, engine, oclass, 0, NULL, 0, &object); + *pobject = nv_object(object); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c new file mode 100644 index 0000000..7f48e28 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/object.c @@ -0,0 +1,474 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <core/parent.h> +#include <core/namedb.h> +#include <core/handle.h> +#include <core/engine.h> + +#ifdef NOUVEAU_OBJECT_MAGIC +static struct list_head _objlist = LIST_HEAD_INIT(_objlist); +static DEFINE_SPINLOCK(_objlist_lock); +#endif + +int +nouveau_object_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, u32 pclass, + int size, void **pobject) +{ + struct nouveau_object *object; + + object = *pobject = kzalloc(size, GFP_KERNEL); + if (!object) + return -ENOMEM; + + nouveau_object_ref(parent, &object->parent); + nouveau_object_ref(engine, &object->engine); + object->oclass = oclass; + object->oclass->handle |= pclass; + atomic_set(&object->refcount, 1); + atomic_set(&object->usecount, 0); + +#ifdef NOUVEAU_OBJECT_MAGIC + object->_magic = NOUVEAU_OBJECT_MAGIC; + spin_lock(&_objlist_lock); + list_add(&object->list, &_objlist); + spin_unlock(&_objlist_lock); +#endif + return 0; +} + +static int +_nouveau_object_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_object *object; + int ret; + + ret = nouveau_object_create(parent, engine, oclass, 0, &object); + *pobject = nv_object(object); + if (ret) + return ret; + + return 0; +} + +void +nouveau_object_destroy(struct nouveau_object *object) +{ +#ifdef NOUVEAU_OBJECT_MAGIC + spin_lock(&_objlist_lock); + list_del(&object->list); + spin_unlock(&_objlist_lock); +#endif + nouveau_object_ref(NULL, &object->engine); + nouveau_object_ref(NULL, &object->parent); + kfree(object); +} + +static void +_nouveau_object_dtor(struct nouveau_object *object) +{ + nouveau_object_destroy(object); +} + +int +nouveau_object_init(struct nouveau_object *object) +{ + return 0; +} + +static int +_nouveau_object_init(struct nouveau_object *object) +{ + return nouveau_object_init(object); +} + +int +nouveau_object_fini(struct nouveau_object *object, bool suspend) +{ + return 0; +} + +static int +_nouveau_object_fini(struct nouveau_object *object, bool suspend) +{ + return nouveau_object_fini(object, suspend); +} + +struct nouveau_ofuncs +nouveau_object_ofuncs = { + .ctor = _nouveau_object_ctor, + .dtor = _nouveau_object_dtor, + .init = _nouveau_object_init, + .fini = _nouveau_object_fini, +}; + +int +nouveau_object_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_ofuncs *ofuncs = oclass->ofuncs; + struct nouveau_object *object = NULL; + int ret; + + ret = ofuncs->ctor(parent, engine, oclass, data, size, &object); + *pobject = object; + if (ret < 0) { + if (ret != -ENODEV) { + nv_error(parent, "failed to create 0x%08x, %d\n", + oclass->handle, ret); + } + + if (object) { + ofuncs->dtor(object); + *pobject = NULL; + } + + return ret; + } + + if (ret == 0) { + nv_debug(object, "created\n"); + atomic_set(&object->refcount, 1); + } + + return 0; +} + +static void +nouveau_object_dtor(struct nouveau_object *object) +{ + nv_debug(object, "destroying\n"); + nv_ofuncs(object)->dtor(object); +} + +void +nouveau_object_ref(struct nouveau_object *obj, struct nouveau_object **ref) +{ + if (obj) { + atomic_inc(&obj->refcount); + nv_trace(obj, "inc() == %d\n", atomic_read(&obj->refcount)); + } + + if (*ref) { + int dead = atomic_dec_and_test(&(*ref)->refcount); + nv_trace(*ref, "dec() == %d\n", atomic_read(&(*ref)->refcount)); + if (dead) + nouveau_object_dtor(*ref); + } + + *ref = obj; +} + +int +nouveau_object_new(struct nouveau_object *client, u32 _parent, u32 _handle, + u16 _oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_object *parent = NULL; + struct nouveau_object *engctx = NULL; + struct nouveau_object *object = NULL; + struct nouveau_object *engine; + struct nouveau_oclass *oclass; + struct nouveau_handle *handle; + int ret; + + /* lookup parent object and ensure it *is* a parent */ + parent = nouveau_handle_ref(client, _parent); + if (!parent) { + nv_error(client, "parent 0x%08x not found\n", _parent); + return -ENOENT; + } + + if (!nv_iclass(parent, NV_PARENT_CLASS)) { + nv_error(parent, "cannot have children\n"); + ret = -EINVAL; + goto fail_class; + } + + /* check that parent supports the requested subclass */ + ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass); + if (ret) { + nv_debug(parent, "illegal class 0x%04x\n", _oclass); + goto fail_class; + } + + /* make sure engine init has been completed *before* any objects + * it controls are created - the constructors may depend on + * state calculated at init (ie. default context construction) + */ + if (engine) { + ret = nouveau_object_inc(engine); + if (ret) + goto fail_class; + } + + /* if engine requires it, create a context object to insert + * between the parent and its children (eg. PGRAPH context) + */ + if (engine && nv_engine(engine)->cclass) { + ret = nouveau_object_ctor(parent, engine, + nv_engine(engine)->cclass, + data, size, &engctx); + if (ret) + goto fail_engctx; + } else { + nouveau_object_ref(parent, &engctx); + } + + /* finally, create new object and bind it to its handle */ + ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object); + *pobject = object; + if (ret) + goto fail_ctor; + + ret = nouveau_object_inc(object); + if (ret) + goto fail_init; + + ret = nouveau_handle_create(parent, _parent, _handle, object, &handle); + if (ret) + goto fail_handle; + + ret = nouveau_handle_init(handle); + if (ret) + nouveau_handle_destroy(handle); + +fail_handle: + nouveau_object_dec(object, false); +fail_init: + nouveau_object_ref(NULL, &object); +fail_ctor: + nouveau_object_ref(NULL, &engctx); +fail_engctx: + if (engine) + nouveau_object_dec(engine, false); +fail_class: + nouveau_object_ref(NULL, &parent); + return ret; +} + +int +nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle) +{ + struct nouveau_object *parent = NULL; + struct nouveau_object *namedb = NULL; + struct nouveau_handle *handle = NULL; + + parent = nouveau_handle_ref(client, _parent); + if (!parent) + return -ENOENT; + + namedb = nv_pclass(parent, NV_NAMEDB_CLASS); + if (namedb) { + handle = nouveau_namedb_get(nv_namedb(namedb), _handle); + if (handle) { + nouveau_namedb_put(handle); + nouveau_handle_fini(handle, false); + nouveau_handle_destroy(handle); + } + } + + nouveau_object_ref(NULL, &parent); + return handle ? 0 : -EINVAL; +} + +int +nouveau_object_inc(struct nouveau_object *object) +{ + int ref = atomic_add_return(1, &object->usecount); + int ret; + + nv_trace(object, "use(+1) == %d\n", atomic_read(&object->usecount)); + if (ref != 1) + return 0; + + nv_trace(object, "initialising...\n"); + if (object->parent) { + ret = nouveau_object_inc(object->parent); + if (ret) { + nv_error(object, "parent failed, %d\n", ret); + goto fail_parent; + } + } + + if (object->engine) { + mutex_lock(&nv_subdev(object->engine)->mutex); + ret = nouveau_object_inc(object->engine); + mutex_unlock(&nv_subdev(object->engine)->mutex); + if (ret) { + nv_error(object, "engine failed, %d\n", ret); + goto fail_engine; + } + } + + ret = nv_ofuncs(object)->init(object); + atomic_set(&object->usecount, 1); + if (ret) { + nv_error(object, "init failed, %d\n", ret); + goto fail_self; + } + + nv_debug(object, "initialised\n"); + return 0; + +fail_self: + if (object->engine) { + mutex_lock(&nv_subdev(object->engine)->mutex); + nouveau_object_dec(object->engine, false); + mutex_unlock(&nv_subdev(object->engine)->mutex); + } +fail_engine: + if (object->parent) + nouveau_object_dec(object->parent, false); +fail_parent: + atomic_dec(&object->usecount); + return ret; +} + +static int +nouveau_object_decf(struct nouveau_object *object) +{ + int ret; + + nv_trace(object, "stopping...\n"); + + ret = nv_ofuncs(object)->fini(object, false); + atomic_set(&object->usecount, 0); + if (ret) + nv_warn(object, "failed fini, %d\n", ret); + + if (object->engine) { + mutex_lock(&nv_subdev(object->engine)->mutex); + nouveau_object_dec(object->engine, false); + mutex_unlock(&nv_subdev(object->engine)->mutex); + } + + if (object->parent) + nouveau_object_dec(object->parent, false); + + nv_debug(object, "stopped\n"); + return 0; +} + +static int +nouveau_object_decs(struct nouveau_object *object) +{ + int ret, rret; + + nv_trace(object, "suspending...\n"); + + ret = nv_ofuncs(object)->fini(object, true); + atomic_set(&object->usecount, 0); + if (ret) { + nv_error(object, "failed suspend, %d\n", ret); + return ret; + } + + if (object->engine) { + mutex_lock(&nv_subdev(object->engine)->mutex); + ret = nouveau_object_dec(object->engine, true); + mutex_unlock(&nv_subdev(object->engine)->mutex); + if (ret) { + nv_warn(object, "engine failed suspend, %d\n", ret); + goto fail_engine; + } + } + + if (object->parent) { + ret = nouveau_object_dec(object->parent, true); + if (ret) { + nv_warn(object, "parent failed suspend, %d\n", ret); + goto fail_parent; + } + } + + nv_debug(object, "suspended\n"); + return 0; + +fail_parent: + if (object->engine) { + mutex_lock(&nv_subdev(object->engine)->mutex); + rret = nouveau_object_inc(object->engine); + mutex_unlock(&nv_subdev(object->engine)->mutex); + if (rret) + nv_fatal(object, "engine failed to reinit, %d\n", rret); + } + +fail_engine: + rret = nv_ofuncs(object)->init(object); + if (rret) + nv_fatal(object, "failed to reinit, %d\n", rret); + + return ret; +} + +int +nouveau_object_dec(struct nouveau_object *object, bool suspend) +{ + int ref = atomic_add_return(-1, &object->usecount); + int ret; + + nv_trace(object, "use(-1) == %d\n", atomic_read(&object->usecount)); + + if (ref == 0) { + if (suspend) + ret = nouveau_object_decs(object); + else + ret = nouveau_object_decf(object); + + if (ret) { + atomic_inc(&object->usecount); + return ret; + } + } + + return 0; +} + +void +nouveau_object_debug(void) +{ +#ifdef NOUVEAU_OBJECT_MAGIC + struct nouveau_object *object; + if (!list_empty(&_objlist)) { + nv_fatal(NULL, "*******************************************\n"); + nv_fatal(NULL, "* AIIIII! object(s) still exist!!!\n"); + nv_fatal(NULL, "*******************************************\n"); + list_for_each_entry(object, &_objlist, list) { + nv_fatal(object, "%p/%p/%d/%d\n", + object->parent, object->engine, + atomic_read(&object->refcount), + atomic_read(&object->usecount)); + } + } +#endif +} diff --git a/drivers/gpu/drm/nouveau/core/core/option.c b/drivers/gpu/drm/nouveau/core/core/option.c new file mode 100644 index 0000000..a3263ea --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/option.c @@ -0,0 +1,131 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/option.h> +#include <core/debug.h> + +/* compares unterminated string 'str' with zero-terminated string 'cmp' */ +static inline int +strncasecmpz(const char *str, const char *cmp, size_t len) +{ + if (strlen(cmp) != len) + return len; + return strncasecmp(str, cmp, len); +} + +const char * +nouveau_stropt(const char *optstr, const char *opt, int *arglen) +{ + while (optstr && *optstr != '\0') { + int len = strcspn(optstr, ",="); + switch (optstr[len]) { + case '=': + if (!strncasecmpz(optstr, opt, len)) { + optstr += len + 1; + *arglen = strcspn(optstr, ",="); + return *arglen ? optstr : NULL; + } + optstr++; + break; + case ',': + optstr++; + break; + default: + break; + } + optstr += len; + } + + return NULL; +} + +bool +nouveau_boolopt(const char *optstr, const char *opt, bool value) +{ + int arglen; + + optstr = nouveau_stropt(optstr, opt, &arglen); + if (optstr) { + if (!strncasecmpz(optstr, "0", arglen) || + !strncasecmpz(optstr, "no", arglen) || + !strncasecmpz(optstr, "off", arglen) || + !strncasecmpz(optstr, "false", arglen)) + value = false; + else + if (!strncasecmpz(optstr, "1", arglen) || + !strncasecmpz(optstr, "yes", arglen) || + !strncasecmpz(optstr, "on", arglen) || + !strncasecmpz(optstr, "true", arglen)) + value = true; + } + + return value; +} + +int +nouveau_dbgopt(const char *optstr, const char *sub) +{ + int mode = 1, level = CPTCFG_NOUVEAU_DEBUG_DEFAULT; + + while (optstr) { + int len = strcspn(optstr, ",="); + switch (optstr[len]) { + case '=': + if (strncasecmpz(optstr, sub, len)) + mode = 0; + optstr++; + break; + default: + if (mode) { + if (!strncasecmpz(optstr, "fatal", len)) + level = NV_DBG_FATAL; + else if (!strncasecmpz(optstr, "error", len)) + level = NV_DBG_ERROR; + else if (!strncasecmpz(optstr, "warn", len)) + level = NV_DBG_WARN; + else if (!strncasecmpz(optstr, "info", len)) + level = NV_DBG_INFO; + else if (!strncasecmpz(optstr, "debug", len)) + level = NV_DBG_DEBUG; + else if (!strncasecmpz(optstr, "trace", len)) + level = NV_DBG_TRACE; + else if (!strncasecmpz(optstr, "paranoia", len)) + level = NV_DBG_PARANOIA; + else if (!strncasecmpz(optstr, "spam", len)) + level = NV_DBG_SPAM; + } + + if (optstr[len] != '\0') { + optstr++; + mode = 1; + break; + } + + return level; + } + optstr += len; + } + + return level; +} diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c new file mode 100644 index 0000000..313380c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/parent.c @@ -0,0 +1,128 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <core/parent.h> +#include <core/client.h> + +int +nouveau_parent_sclass(struct nouveau_object *parent, u16 handle, + struct nouveau_object **pengine, + struct nouveau_oclass **poclass) +{ + struct nouveau_sclass *sclass; + struct nouveau_engine *engine; + struct nouveau_oclass *oclass; + u64 mask; + + sclass = nv_parent(parent)->sclass; + while (sclass) { + if ((sclass->oclass->handle & 0xffff) == handle) { + *pengine = parent->engine; + *poclass = sclass->oclass; + return 0; + } + + sclass = sclass->sclass; + } + + mask = nv_parent(parent)->engine; + while (mask) { + int i = ffsll(mask) - 1; + + if (nv_iclass(parent, NV_CLIENT_CLASS)) + engine = nv_engine(nv_client(parent)->device); + else + engine = nouveau_engine(parent, i); + + if (engine) { + oclass = engine->sclass; + while (oclass->ofuncs) { + if ((oclass->handle & 0xffff) == handle) { + *pengine = nv_object(engine); + *poclass = oclass; + return 0; + } + oclass++; + } + } + + mask &= ~(1ULL << i); + } + + return -EINVAL; +} + +int +nouveau_parent_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, u32 pclass, + struct nouveau_oclass *sclass, u64 engcls, + int size, void **pobject) +{ + struct nouveau_parent *object; + struct nouveau_sclass *nclass; + int ret; + + ret = nouveau_object_create_(parent, engine, oclass, pclass | + NV_PARENT_CLASS, size, pobject); + object = *pobject; + if (ret) + return ret; + + while (sclass && sclass->ofuncs) { + nclass = kzalloc(sizeof(*nclass), GFP_KERNEL); + if (!nclass) + return -ENOMEM; + + nclass->sclass = object->sclass; + object->sclass = nclass; + nclass->engine = engine ? nv_engine(engine) : NULL; + nclass->oclass = sclass; + sclass++; + } + + object->engine = engcls; + return 0; +} + +void +nouveau_parent_destroy(struct nouveau_parent *parent) +{ + struct nouveau_sclass *sclass; + + while ((sclass = parent->sclass)) { + parent->sclass = sclass->sclass; + kfree(sclass); + } + + nouveau_object_destroy(&parent->base); +} + + +void +_nouveau_parent_dtor(struct nouveau_object *object) +{ + nouveau_parent_destroy(nv_parent(object)); +} diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/core/core/printk.c new file mode 100644 index 0000000..6161eaf --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/printk.c @@ -0,0 +1,74 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <core/client.h> +#include <core/subdev.h> +#include <core/printk.h> + +void +nv_printk_(struct nouveau_object *object, const char *pfx, int level, + const char *fmt, ...) +{ + static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' }; + char mfmt[256]; + va_list args; + + if (object && !nv_iclass(object, NV_CLIENT_CLASS)) { + struct nouveau_object *device = object; + struct nouveau_object *subdev = object; + char obuf[64], *ofmt = ""; + + if (object->engine) { + snprintf(obuf, sizeof(obuf), "[0x%08x][%p]", + nv_hclass(object), object); + ofmt = obuf; + subdev = object->engine; + device = object->engine; + } + + if (subdev->parent) + device = subdev->parent; + + if (level > nv_subdev(subdev)->debug) + return; + + snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s][%s]%s %s", pfx, + name[level], nv_subdev(subdev)->name, + nv_device(device)->name, ofmt, fmt); + } else + if (object && nv_iclass(object, NV_CLIENT_CLASS)) { + if (level > nv_client(object)->debug) + return; + + snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s] %s", pfx, + name[level], nv_client(object)->name, fmt); + } else { + snprintf(mfmt, sizeof(mfmt), "%snouveau: %s", pfx, fmt); + } + + va_start(args, fmt); + vprintk(mfmt, args); + va_end(args); +} diff --git a/drivers/gpu/drm/nouveau/core/core/ramht.c b/drivers/gpu/drm/nouveau/core/core/ramht.c new file mode 100644 index 0000000..86a6404 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/ramht.c @@ -0,0 +1,109 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <core/object.h> +#include <core/ramht.h> +#include <core/math.h> + +#include <subdev/bar.h> + +static u32 +nouveau_ramht_hash(struct nouveau_ramht *ramht, int chid, u32 handle) +{ + u32 hash = 0; + + while (handle) { + hash ^= (handle & ((1 << ramht->bits) - 1)); + handle >>= ramht->bits; + } + + hash ^= chid << (ramht->bits - 4); + hash = hash << 3; + return hash; +} + +int +nouveau_ramht_insert(struct nouveau_ramht *ramht, int chid, + u32 handle, u32 context) +{ + struct nouveau_bar *bar = nouveau_bar(ramht); + u32 co, ho; + + co = ho = nouveau_ramht_hash(ramht, chid, handle); + do { + if (!nv_ro32(ramht, co + 4)) { + nv_wo32(ramht, co + 0, handle); + nv_wo32(ramht, co + 4, context); + if (bar) + bar->flush(bar); + return co; + } + + co += 8; + if (co >= nv_gpuobj(ramht)->size) + co = 0; + } while (co != ho); + + return -ENOMEM; +} + +void +nouveau_ramht_remove(struct nouveau_ramht *ramht, int cookie) +{ + struct nouveau_bar *bar = nouveau_bar(ramht); + nv_wo32(ramht, cookie + 0, 0x00000000); + nv_wo32(ramht, cookie + 4, 0x00000000); + if (bar) + bar->flush(bar); +} + +static struct nouveau_oclass +nouveau_ramht_oclass = { + .handle = 0x0000abcd, + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = NULL, + .dtor = _nouveau_gpuobj_dtor, + .init = _nouveau_gpuobj_init, + .fini = _nouveau_gpuobj_fini, + .rd32 = _nouveau_gpuobj_rd32, + .wr32 = _nouveau_gpuobj_wr32, + }, +}; + +int +nouveau_ramht_new(struct nouveau_object *parent, struct nouveau_object *pargpu, + u32 size, u32 align, struct nouveau_ramht **pramht) +{ + struct nouveau_ramht *ramht; + int ret; + + ret = nouveau_gpuobj_create(parent, parent->engine ? + parent->engine : parent, /* <nv50 ramht */ + &nouveau_ramht_oclass, 0, pargpu, size, + align, NVOBJ_FLAG_ZERO_ALLOC, &ramht); + *pramht = ramht; + if (ret) + return ret; + + ramht->bits = log2i(nv_gpuobj(ramht)->size >> 3); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c new file mode 100644 index 0000000..48f0637 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/subdev.c @@ -0,0 +1,115 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <core/subdev.h> +#include <core/device.h> +#include <core/option.h> + +void +nouveau_subdev_reset(struct nouveau_object *subdev) +{ + nv_trace(subdev, "resetting...\n"); + nv_ofuncs(subdev)->fini(subdev, false); + nv_debug(subdev, "reset\n"); +} + +int +nouveau_subdev_init(struct nouveau_subdev *subdev) +{ + int ret = nouveau_object_init(&subdev->base); + if (ret) + return ret; + + nouveau_subdev_reset(&subdev->base); + return 0; +} + +int +_nouveau_subdev_init(struct nouveau_object *object) +{ + return nouveau_subdev_init(nv_subdev(object)); +} + +int +nouveau_subdev_fini(struct nouveau_subdev *subdev, bool suspend) +{ + if (subdev->unit) { + nv_mask(subdev, 0x000200, subdev->unit, 0x00000000); + nv_mask(subdev, 0x000200, subdev->unit, subdev->unit); + } + + return nouveau_object_fini(&subdev->base, suspend); +} + +int +_nouveau_subdev_fini(struct nouveau_object *object, bool suspend) +{ + return nouveau_subdev_fini(nv_subdev(object), suspend); +} + +void +nouveau_subdev_destroy(struct nouveau_subdev *subdev) +{ + int subidx = nv_hclass(subdev) & 0xff; + nv_device(subdev)->subdev[subidx] = NULL; + nouveau_object_destroy(&subdev->base); +} + +void +_nouveau_subdev_dtor(struct nouveau_object *object) +{ + nouveau_subdev_destroy(nv_subdev(object)); +} + +int +nouveau_subdev_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, u32 pclass, + const char *subname, const char *sysname, + int size, void **pobject) +{ + struct nouveau_subdev *subdev; + int ret; + + ret = nouveau_object_create_(parent, engine, oclass, pclass | + NV_SUBDEV_CLASS, size, pobject); + subdev = *pobject; + if (ret) + return ret; + + __mutex_init(&subdev->mutex, subname, &oclass->lock_class_key); + subdev->name = subname; + + if (parent) { + struct nouveau_device *device = nv_device(parent); + int subidx = nv_hclass(subdev) & 0xff; + + subdev->debug = nouveau_dbgopt(device->dbgopt, subname); + subdev->mmio = nv_subdev(device)->mmio; + device->subdev[subidx] = *pobject; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c new file mode 100644 index 0000000..1d9f614 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c @@ -0,0 +1,93 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/engctx.h> +#include <core/class.h> + +#include <engine/bsp.h> + +struct nv84_bsp_priv { + struct nouveau_engine base; +}; + +/******************************************************************************* + * BSP object classes + ******************************************************************************/ + +static struct nouveau_oclass +nv84_bsp_sclass[] = { + {}, +}; + +/******************************************************************************* + * BSP context + ******************************************************************************/ + +static struct nouveau_oclass +nv84_bsp_cclass = { + .handle = NV_ENGCTX(BSP, 0x84), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nouveau_engctx_ctor, + .dtor = _nouveau_engctx_dtor, + .init = _nouveau_engctx_init, + .fini = _nouveau_engctx_fini, + .rd32 = _nouveau_engctx_rd32, + .wr32 = _nouveau_engctx_wr32, + }, +}; + +/******************************************************************************* + * BSP engine/subdev functions + ******************************************************************************/ + +static int +nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv84_bsp_priv *priv; + int ret; + + ret = nouveau_engine_create(parent, engine, oclass, true, + "PBSP", "bsp", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x04008000; + nv_engine(priv)->cclass = &nv84_bsp_cclass; + nv_engine(priv)->sclass = nv84_bsp_sclass; + return 0; +} + +struct nouveau_oclass +nv84_bsp_oclass = { + .handle = NV_ENGINE(BSP, 0x84), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv84_bsp_ctor, + .dtor = _nouveau_engine_dtor, + .init = _nouveau_engine_init, + .fini = _nouveau_engine_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c new file mode 100644 index 0000000..0a5aa6b --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c @@ -0,0 +1,110 @@ +/* + * Copyright 2012 Maarten Lankhorst + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Maarten Lankhorst + */ + +#include <core/falcon.h> + +#include <engine/bsp.h> + +struct nvc0_bsp_priv { + struct nouveau_falcon base; +}; + +/******************************************************************************* + * BSP object classes + ******************************************************************************/ + +static struct nouveau_oclass +nvc0_bsp_sclass[] = { + { 0x90b1, &nouveau_object_ofuncs }, + {}, +}; + +/******************************************************************************* + * PBSP context + ******************************************************************************/ + +static struct nouveau_oclass +nvc0_bsp_cclass = { + .handle = NV_ENGCTX(BSP, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nouveau_falcon_context_ctor, + .dtor = _nouveau_falcon_context_dtor, + .init = _nouveau_falcon_context_init, + .fini = _nouveau_falcon_context_fini, + .rd32 = _nouveau_falcon_context_rd32, + .wr32 = _nouveau_falcon_context_wr32, + }, +}; + +/******************************************************************************* + * PBSP engine/subdev functions + ******************************************************************************/ + +static int +nvc0_bsp_init(struct nouveau_object *object) +{ + struct nvc0_bsp_priv *priv = (void *)object; + int ret; + + ret = nouveau_falcon_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x084010, 0x0000fff2); + nv_wr32(priv, 0x08401c, 0x0000fff2); + return 0; +} + +static int +nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_bsp_priv *priv; + int ret; + + ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true, + "PBSP", "bsp", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00008000; + nv_engine(priv)->cclass = &nvc0_bsp_cclass; + nv_engine(priv)->sclass = nvc0_bsp_sclass; + return 0; +} + +struct nouveau_oclass +nvc0_bsp_oclass = { + .handle = NV_ENGINE(BSP, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_bsp_ctor, + .dtor = _nouveau_falcon_dtor, + .init = nvc0_bsp_init, + .fini = _nouveau_falcon_fini, + .rd32 = _nouveau_falcon_rd32, + .wr32 = _nouveau_falcon_wr32, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c new file mode 100644 index 0000000..d4f23bb --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c @@ -0,0 +1,110 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/falcon.h> + +#include <engine/bsp.h> + +struct nve0_bsp_priv { + struct nouveau_falcon base; +}; + +/******************************************************************************* + * BSP object classes + ******************************************************************************/ + +static struct nouveau_oclass +nve0_bsp_sclass[] = { + { 0x95b1, &nouveau_object_ofuncs }, + {}, +}; + +/******************************************************************************* + * PBSP context + ******************************************************************************/ + +static struct nouveau_oclass +nve0_bsp_cclass = { + .handle = NV_ENGCTX(BSP, 0xe0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nouveau_falcon_context_ctor, + .dtor = _nouveau_falcon_context_dtor, + .init = _nouveau_falcon_context_init, + .fini = _nouveau_falcon_context_fini, + .rd32 = _nouveau_falcon_context_rd32, + .wr32 = _nouveau_falcon_context_wr32, + }, +}; + +/******************************************************************************* + * PBSP engine/subdev functions + ******************************************************************************/ + +static int +nve0_bsp_init(struct nouveau_object *object) +{ + struct nve0_bsp_priv *priv = (void *)object; + int ret; + + ret = nouveau_falcon_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x084010, 0x0000fff2); + nv_wr32(priv, 0x08401c, 0x0000fff2); + return 0; +} + +static int +nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nve0_bsp_priv *priv; + int ret; + + ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true, + "PBSP", "bsp", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00008000; + nv_engine(priv)->cclass = &nve0_bsp_cclass; + nv_engine(priv)->sclass = nve0_bsp_sclass; + return 0; +} + +struct nouveau_oclass +nve0_bsp_oclass = { + .handle = NV_ENGINE(BSP, 0xe0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nve0_bsp_ctor, + .dtor = _nouveau_falcon_dtor, + .init = nve0_bsp_init, + .fini = _nouveau_falcon_fini, + .rd32 = _nouveau_falcon_rd32, + .wr32 = _nouveau_falcon_wr32, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc new file mode 100644 index 0000000..219850d --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc @@ -0,0 +1,872 @@ +/* fuc microcode for copy engine on nva3- chipsets + * + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +/* To build for nva3:nvc0 + * m4 -DNVA3 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nva3_copy.fuc.h + * + * To build for nvc0- + * m4 -DNVC0 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_copy.fuc.h + */ + +ifdef(`NVA3', +.section #nva3_pcopy_data +, +.section #nvc0_pcopy_data +) + +ctx_object: .b32 0 +ifdef(`NVA3', +ctx_dma: +ctx_dma_query: .b32 0 +ctx_dma_src: .b32 0 +ctx_dma_dst: .b32 0 +,) +.equ #ctx_dma_count 3 +ctx_query_address_high: .b32 0 +ctx_query_address_low: .b32 0 +ctx_query_counter: .b32 0 +ctx_src_address_high: .b32 0 +ctx_src_address_low: .b32 0 +ctx_src_pitch: .b32 0 +ctx_src_tile_mode: .b32 0 +ctx_src_xsize: .b32 0 +ctx_src_ysize: .b32 0 +ctx_src_zsize: .b32 0 +ctx_src_zoff: .b32 0 +ctx_src_xoff: .b32 0 +ctx_src_yoff: .b32 0 +ctx_src_cpp: .b32 0 +ctx_dst_address_high: .b32 0 +ctx_dst_address_low: .b32 0 +ctx_dst_pitch: .b32 0 +ctx_dst_tile_mode: .b32 0 +ctx_dst_xsize: .b32 0 +ctx_dst_ysize: .b32 0 +ctx_dst_zsize: .b32 0 +ctx_dst_zoff: .b32 0 +ctx_dst_xoff: .b32 0 +ctx_dst_yoff: .b32 0 +ctx_dst_cpp: .b32 0 +ctx_format: .b32 0 +ctx_swz_const0: .b32 0 +ctx_swz_const1: .b32 0 +ctx_xcnt: .b32 0 +ctx_ycnt: .b32 0 +.align 256 + +dispatch_table: +// mthd 0x0000, NAME +.b16 0x000 1 +.b32 #ctx_object ~0xffffffff +// mthd 0x0100, NOP +.b16 0x040 1 +.b32 0x00010000 + #cmd_nop ~0xffffffff +// mthd 0x0140, PM_TRIGGER +.b16 0x050 1 +.b32 0x00010000 + #cmd_pm_trigger ~0xffffffff +ifdef(`NVA3', ` +// mthd 0x0180-0x018c, DMA_ +.b16 0x060 #ctx_dma_count +dispatch_dma: +.b32 0x00010000 + #cmd_dma ~0xffffffff +.b32 0x00010000 + #cmd_dma ~0xffffffff +.b32 0x00010000 + #cmd_dma ~0xffffffff +',) +// mthd 0x0200-0x0218, SRC_TILE +.b16 0x80 7 +.b32 #ctx_src_tile_mode ~0x00000fff +.b32 #ctx_src_xsize ~0x0007ffff +.b32 #ctx_src_ysize ~0x00001fff +.b32 #ctx_src_zsize ~0x000007ff +.b32 #ctx_src_zoff ~0x00000fff +.b32 #ctx_src_xoff ~0x0007ffff +.b32 #ctx_src_yoff ~0x00001fff +// mthd 0x0220-0x0238, DST_TILE +.b16 0x88 7 +.b32 #ctx_dst_tile_mode ~0x00000fff +.b32 #ctx_dst_xsize ~0x0007ffff +.b32 #ctx_dst_ysize ~0x00001fff +.b32 #ctx_dst_zsize ~0x000007ff +.b32 #ctx_dst_zoff ~0x00000fff +.b32 #ctx_dst_xoff ~0x0007ffff +.b32 #ctx_dst_yoff ~0x00001fff +// mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH +.b16 0xc0 2 +.b32 0x00010000 + #cmd_exec ~0xffffffff +.b32 0x00010000 + #cmd_wrcache_flush ~0xffffffff +// mthd 0x030c-0x0340, various stuff +.b16 0xc3 14 +.b32 #ctx_src_address_high ~0x000000ff +.b32 #ctx_src_address_low ~0xffffffff +.b32 #ctx_dst_address_high ~0x000000ff +.b32 #ctx_dst_address_low ~0xffffffff +.b32 #ctx_src_pitch ~0x0007ffff +.b32 #ctx_dst_pitch ~0x0007ffff +.b32 #ctx_xcnt ~0x0000ffff +.b32 #ctx_ycnt ~0x00001fff +.b32 #ctx_format ~0x0333ffff +.b32 #ctx_swz_const0 ~0xffffffff +.b32 #ctx_swz_const1 ~0xffffffff +.b32 #ctx_query_address_high ~0x000000ff +.b32 #ctx_query_address_low ~0xffffffff +.b32 #ctx_query_counter ~0xffffffff +.b16 0x800 0 + +ifdef(`NVA3', +.section #nva3_pcopy_code +, +.section #nvc0_pcopy_code +) + +main: + clear b32 $r0 + mov $sp $r0 + + // setup i0 handler and route fifo and ctxswitch to it + mov $r1 #ih + mov $iv0 $r1 + mov $r1 0x400 + movw $r2 0xfff3 + sethi $r2 0 + iowr I[$r1 + 0x300] $r2 + + // enable interrupts + or $r2 0xc + iowr I[$r1] $r2 + bset $flags ie0 + + // enable fifo access and context switching + mov $r1 0x1200 + mov $r2 3 + iowr I[$r1] $r2 + + // sleep forever, waking for interrupts + bset $flags $p0 + spin: + sleep $p0 + bra #spin + +// i0 handler +ih: + iord $r1 I[$r0 + 0x200] + + and $r2 $r1 0x00000008 + bra e #ih_no_chsw + call #chsw + ih_no_chsw: + and $r2 $r1 0x00000004 + bra e #ih_no_cmd + call #dispatch + + ih_no_cmd: + and $r1 $r1 0x0000000c + iowr I[$r0 + 0x100] $r1 + iret + +// $p1 direction (0 = unload, 1 = load) +// $r3 channel +swctx: + mov $r4 0x7700 + mov $xtargets $r4 +ifdef(`NVA3', ` + // target 7 hardcoded to ctx dma object + mov $xdbase $r0 +', ` // NVC0 + // read SCRATCH3 to decide if we are PCOPY0 or PCOPY1 + mov $r4 0x2100 + iord $r4 I[$r4 + 0] + and $r4 1 + shl b32 $r4 4 + add b32 $r4 0x30 + + // channel is in vram + mov $r15 0x61c + shl b32 $r15 6 + mov $r5 0x114 + iowrs I[$r15] $r5 + + // read 16-byte PCOPYn info, containing context pointer, from channel + shl b32 $r5 $r3 4 + add b32 $r5 2 + mov $xdbase $r5 + mov $r5 $sp + // get a chunk of stack space, aligned to 256 byte boundary + sub b32 $r5 0x100 + mov $r6 0xff + not b32 $r6 + and $r5 $r6 + sethi $r5 0x00020000 + xdld $r4 $r5 + xdwait + sethi $r5 0 + + // set context pointer, from within channel VM + mov $r14 0 + iowrs I[$r15] $r14 + ld b32 $r4 D[$r5 + 0] + shr b32 $r4 8 + ld b32 $r6 D[$r5 + 4] + shl b32 $r6 24 + or $r4 $r6 + mov $xdbase $r4 +') + // 256-byte context, at start of data segment + mov b32 $r4 $r0 + sethi $r4 0x60000 + + // swap! + bra $p1 #swctx_load + xdst $r0 $r4 + bra #swctx_done + swctx_load: + xdld $r0 $r4 + swctx_done: + xdwait + ret + +chsw: + // read current channel + mov $r2 0x1400 + iord $r3 I[$r2] + + // if it's active, unload it and return + xbit $r15 $r3 0x1e + bra e #chsw_no_unload + bclr $flags $p1 + call #swctx + bclr $r3 0x1e + iowr I[$r2] $r3 + mov $r4 1 + iowr I[$r2 + 0x200] $r4 + ret + + // read next channel + chsw_no_unload: + iord $r3 I[$r2 + 0x100] + + // is there a channel waiting to be loaded? + xbit $r13 $r3 0x1e + bra e #chsw_finish_load + bset $flags $p1 + call #swctx +ifdef(`NVA3', + // load dma objects back into TARGET regs + mov $r5 #ctx_dma + mov $r6 #ctx_dma_count + chsw_load_ctx_dma: + ld b32 $r7 D[$r5 + $r6 * 4] + add b32 $r8 $r6 0x180 + shl b32 $r8 8 + iowr I[$r8] $r7 + sub b32 $r6 1 + bra nc #chsw_load_ctx_dma +,) + + chsw_finish_load: + mov $r3 2 + iowr I[$r2 + 0x200] $r3 + ret + +dispatch: + // read incoming fifo command + mov $r3 0x1900 + iord $r2 I[$r3 + 0x100] + iord $r3 I[$r3 + 0x000] + and $r4 $r2 0x7ff + // $r2 will be used to store exception data + shl b32 $r2 0x10 + + // lookup method in the dispatch table, ILLEGAL_MTHD if not found + mov $r5 #dispatch_table + clear b32 $r6 + clear b32 $r7 + dispatch_loop: + ld b16 $r6 D[$r5 + 0] + ld b16 $r7 D[$r5 + 2] + add b32 $r5 4 + cmpu b32 $r4 $r6 + bra c #dispatch_illegal_mthd + add b32 $r7 $r6 + cmpu b32 $r4 $r7 + bra c #dispatch_valid_mthd + sub b32 $r7 $r6 + shl b32 $r7 3 + add b32 $r5 $r7 + bra #dispatch_loop + + // ensure no bits set in reserved fields, INVALID_BITFIELD + dispatch_valid_mthd: + sub b32 $r4 $r6 + shl b32 $r4 3 + add b32 $r4 $r5 + ld b32 $r5 D[$r4 + 4] + and $r5 $r3 + cmpu b32 $r5 0 + bra ne #dispatch_invalid_bitfield + + // depending on dispatch flags: execute method, or save data as state + ld b16 $r5 D[$r4 + 0] + ld b16 $r6 D[$r4 + 2] + cmpu b32 $r6 0 + bra ne #dispatch_cmd + st b32 D[$r5] $r3 + bra #dispatch_done + dispatch_cmd: + bclr $flags $p1 + call $r5 + bra $p1 #dispatch_error + bra #dispatch_done + + dispatch_invalid_bitfield: + or $r2 2 + dispatch_illegal_mthd: + or $r2 1 + + // store exception data in SCRATCH0/SCRATCH1, signal hostirq + dispatch_error: + mov $r4 0x1000 + iowr I[$r4 + 0x000] $r2 + iowr I[$r4 + 0x100] $r3 + mov $r2 0x40 + iowr I[$r0] $r2 + hostirq_wait: + iord $r2 I[$r0 + 0x200] + and $r2 0x40 + cmpu b32 $r2 0 + bra ne #hostirq_wait + + dispatch_done: + mov $r2 0x1d00 + mov $r3 1 + iowr I[$r2] $r3 + ret + +// No-operation +// +// Inputs: +// $r1: irqh state +// $r2: hostirq state +// $r3: data +// $r4: dispatch table entry +// Outputs: +// $r1: irqh state +// $p1: set on error +// $r2: hostirq state +// $r3: data +cmd_nop: + ret + +// PM_TRIGGER +// +// Inputs: +// $r1: irqh state +// $r2: hostirq state +// $r3: data +// $r4: dispatch table entry +// Outputs: +// $r1: irqh state +// $p1: set on error +// $r2: hostirq state +// $r3: data +cmd_pm_trigger: + mov $r2 0x2200 + clear b32 $r3 + sethi $r3 0x20000 + iowr I[$r2] $r3 + ret + +ifdef(`NVA3', +// SET_DMA_* method handler +// +// Inputs: +// $r1: irqh state +// $r2: hostirq state +// $r3: data +// $r4: dispatch table entry +// Outputs: +// $r1: irqh state +// $p1: set on error +// $r2: hostirq state +// $r3: data +cmd_dma: + sub b32 $r4 #dispatch_dma + shr b32 $r4 1 + bset $r3 0x1e + st b32 D[$r4 + #ctx_dma] $r3 + add b32 $r4 0x600 + shl b32 $r4 6 + iowr I[$r4] $r3 + ret +,) + +// Calculates the hw swizzle mask and adjusts the surface's xcnt to match +// +cmd_exec_set_format: + // zero out a chunk of the stack to store the swizzle into + add $sp -0x10 + st b32 D[$sp + 0x00] $r0 + st b32 D[$sp + 0x04] $r0 + st b32 D[$sp + 0x08] $r0 + st b32 D[$sp + 0x0c] $r0 + + // extract cpp, src_ncomp and dst_ncomp from FORMAT + ld b32 $r4 D[$r0 + #ctx_format] + extr $r5 $r4 16:17 + add b32 $r5 1 + extr $r6 $r4 20:21 + add b32 $r6 1 + extr $r7 $r4 24:25 + add b32 $r7 1 + + // convert FORMAT swizzle mask to hw swizzle mask + bclr $flags $p2 + clear b32 $r8 + clear b32 $r9 + ncomp_loop: + and $r10 $r4 0xf + shr b32 $r4 4 + clear b32 $r11 + bpc_loop: + cmpu b8 $r10 4 + bra nc #cmp_c0 + mulu $r12 $r10 $r5 + add b32 $r12 $r11 + bset $flags $p2 + bra #bpc_next + cmp_c0: + bra ne #cmp_c1 + mov $r12 0x10 + add b32 $r12 $r11 + bra #bpc_next + cmp_c1: + cmpu b8 $r10 6 + bra nc #cmp_zero + mov $r12 0x14 + add b32 $r12 $r11 + bra #bpc_next + cmp_zero: + mov $r12 0x80 + bpc_next: + st b8 D[$sp + $r8] $r12 + add b32 $r8 1 + add b32 $r11 1 + cmpu b32 $r11 $r5 + bra c #bpc_loop + add b32 $r9 1 + cmpu b32 $r9 $r7 + bra c #ncomp_loop + + // SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang) + mulu $r6 $r5 + st b32 D[$r0 + #ctx_src_cpp] $r6 + ld b32 $r8 D[$r0 + #ctx_xcnt] + mulu $r6 $r8 + bra $p2 #dst_xcnt + clear b32 $r6 + + dst_xcnt: + mulu $r7 $r5 + st b32 D[$r0 + #ctx_dst_cpp] $r7 + mulu $r7 $r8 + + mov $r5 0x810 + shl b32 $r5 6 + iowr I[$r5 + 0x000] $r6 + iowr I[$r5 + 0x100] $r7 + add b32 $r5 0x800 + ld b32 $r6 D[$r0 + #ctx_dst_cpp] + sub b32 $r6 1 + shl b32 $r6 8 + ld b32 $r7 D[$r0 + #ctx_src_cpp] + sub b32 $r7 1 + or $r6 $r7 + iowr I[$r5 + 0x000] $r6 + add b32 $r5 0x100 + ld b32 $r6 D[$sp + 0x00] + iowr I[$r5 + 0x000] $r6 + ld b32 $r6 D[$sp + 0x04] + iowr I[$r5 + 0x100] $r6 + ld b32 $r6 D[$sp + 0x08] + iowr I[$r5 + 0x200] $r6 + ld b32 $r6 D[$sp + 0x0c] + iowr I[$r5 + 0x300] $r6 + add b32 $r5 0x400 + ld b32 $r6 D[$r0 + #ctx_swz_const0] + iowr I[$r5 + 0x000] $r6 + ld b32 $r6 D[$r0 + #ctx_swz_const1] + iowr I[$r5 + 0x100] $r6 + add $sp 0x10 + ret + +// Setup to handle a tiled surface +// +// Calculates a number of parameters the hardware requires in order +// to correctly handle tiling. +// +// Offset calculation is performed as follows (Tp/Th/Td from TILE_MODE): +// nTx = round_up(w * cpp, 1 << Tp) >> Tp +// nTy = round_up(h, 1 << Th) >> Th +// Txo = (x * cpp) & ((1 << Tp) - 1) +// Tx = (x * cpp) >> Tp +// Tyo = y & ((1 << Th) - 1) +// Ty = y >> Th +// Tzo = z & ((1 << Td) - 1) +// Tz = z >> Td +// +// off = (Tzo << Tp << Th) + (Tyo << Tp) + Txo +// off += ((Tz * nTy * nTx)) + (Ty * nTx) + Tx) << Td << Th << Tp; +// +// Inputs: +// $r4: hw command (0x104800) +// $r5: ctx offset adjustment for src/dst selection +// $p2: set if dst surface +// +cmd_exec_set_surface_tiled: + // translate TILE_MODE into Tp, Th, Td shift values + ld b32 $r7 D[$r5 + #ctx_src_tile_mode] + extr $r9 $r7 8:11 + extr $r8 $r7 4:7 +ifdef(`NVA3', + add b32 $r8 2 +, + add b32 $r8 3 +) + extr $r7 $r7 0:3 + cmp b32 $r7 0xe + bra ne #xtile64 + mov $r7 4 + bra #xtileok + xtile64: + xbit $r7 $flags $p2 + add b32 $r7 17 + bset $r4 $r7 + mov $r7 6 + xtileok: + + // Op = (x * cpp) & ((1 << Tp) - 1) + // Tx = (x * cpp) >> Tp + ld b32 $r10 D[$r5 + #ctx_src_xoff] + ld b32 $r11 D[$r5 + #ctx_src_cpp] + mulu $r10 $r11 + mov $r11 1 + shl b32 $r11 $r7 + sub b32 $r11 1 + and $r12 $r10 $r11 + shr b32 $r10 $r7 + + // Tyo = y & ((1 << Th) - 1) + // Ty = y >> Th + ld b32 $r13 D[$r5 + #ctx_src_yoff] + mov $r14 1 + shl b32 $r14 $r8 + sub b32 $r14 1 + and $r11 $r13 $r14 + shr b32 $r13 $r8 + + // YTILE = ((1 << Th) << 12) | ((1 << Th) - Tyo) + add b32 $r14 1 + shl b32 $r15 $r14 12 + sub b32 $r14 $r11 + or $r15 $r14 + xbit $r6 $flags $p2 + add b32 $r6 0x208 + shl b32 $r6 8 + iowr I[$r6 + 0x000] $r15 + + // Op += Tyo << Tp + shl b32 $r11 $r7 + add b32 $r12 $r11 + + // nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp) + ld b32 $r15 D[$r5 + #ctx_src_xsize] + ld b32 $r11 D[$r5 + #ctx_src_cpp] + mulu $r15 $r11 + mov $r11 1 + shl b32 $r11 $r7 + sub b32 $r11 1 + add b32 $r15 $r11 + shr b32 $r15 $r7 + push $r15 + + // nTy = (h + ((1 << Th) - 1)) >> Th + ld b32 $r15 D[$r5 + #ctx_src_ysize] + mov $r11 1 + shl b32 $r11 $r8 + sub b32 $r11 1 + add b32 $r15 $r11 + shr b32 $r15 $r8 + push $r15 + + // Tys = Tp + Th + // CFG_YZ_TILE_SIZE = ((1 << Th) >> 2) << Td + add b32 $r7 $r8 + sub b32 $r8 2 + mov $r11 1 + shl b32 $r11 $r8 + shl b32 $r11 $r9 + + // Tzo = z & ((1 << Td) - 1) + // Tz = z >> Td + // Op += Tzo << Tys + // Ts = Tys + Td + ld b32 $r8 D[$r5 + #ctx_src_zoff] + mov $r14 1 + shl b32 $r14 $r9 + sub b32 $r14 1 + and $r15 $r8 $r14 + shl b32 $r15 $r7 + add b32 $r12 $r15 + add b32 $r7 $r9 + shr b32 $r8 $r9 + + // Ot = ((Tz * nTy * nTx) + (Ty * nTx) + Tx) << Ts + pop $r15 + pop $r9 + mulu $r13 $r9 + add b32 $r10 $r13 + mulu $r8 $r9 + mulu $r8 $r15 + add b32 $r10 $r8 + shl b32 $r10 $r7 + + // PITCH = (nTx - 1) << Ts + sub b32 $r9 1 + shl b32 $r9 $r7 + iowr I[$r6 + 0x200] $r9 + + // SRC_ADDRESS_LOW = (Ot + Op) & 0xffffffff + // CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16 + ld b32 $r7 D[$r5 + #ctx_src_address_low] + ld b32 $r8 D[$r5 + #ctx_src_address_high] + add b32 $r10 $r12 + add b32 $r7 $r10 + adc b32 $r8 0 + shl b32 $r8 16 + or $r8 $r11 + sub b32 $r6 0x600 + iowr I[$r6 + 0x000] $r7 + add b32 $r6 0x400 + iowr I[$r6 + 0x000] $r8 + ret + +// Setup to handle a linear surface +// +// Nothing to see here.. Sets ADDRESS and PITCH, pretty non-exciting +// +cmd_exec_set_surface_linear: + xbit $r6 $flags $p2 + add b32 $r6 0x202 + shl b32 $r6 8 + ld b32 $r7 D[$r5 + #ctx_src_address_low] + iowr I[$r6 + 0x000] $r7 + add b32 $r6 0x400 + ld b32 $r7 D[$r5 + #ctx_src_address_high] + shl b32 $r7 16 + iowr I[$r6 + 0x000] $r7 + add b32 $r6 0x400 + ld b32 $r7 D[$r5 + #ctx_src_pitch] + iowr I[$r6 + 0x000] $r7 + ret + +// wait for regs to be available for use +cmd_exec_wait: + push $r0 + push $r1 + mov $r0 0x800 + shl b32 $r0 6 + loop: + iord $r1 I[$r0] + and $r1 1 + bra ne #loop + pop $r1 + pop $r0 + ret + +cmd_exec_query: + // if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI } + xbit $r4 $r3 13 + bra ne #query_counter + call #cmd_exec_wait + mov $r4 0x80c + shl b32 $r4 6 + ld b32 $r5 D[$r0 + #ctx_query_address_low] + add b32 $r5 4 + iowr I[$r4 + 0x000] $r5 + iowr I[$r4 + 0x100] $r0 + mov $r5 0xc + iowr I[$r4 + 0x200] $r5 + add b32 $r4 0x400 + ld b32 $r5 D[$r0 + #ctx_query_address_high] + shl b32 $r5 16 + iowr I[$r4 + 0x000] $r5 + add b32 $r4 0x500 + mov $r5 0x00000b00 + sethi $r5 0x00010000 + iowr I[$r4 + 0x000] $r5 + mov $r5 0x00004040 + shl b32 $r5 1 + sethi $r5 0x80800000 + iowr I[$r4 + 0x100] $r5 + mov $r5 0x00001110 + sethi $r5 0x13120000 + iowr I[$r4 + 0x200] $r5 + mov $r5 0x00001514 + sethi $r5 0x17160000 + iowr I[$r4 + 0x300] $r5 + mov $r5 0x00002601 + sethi $r5 0x00010000 + mov $r4 0x800 + shl b32 $r4 6 + iowr I[$r4 + 0x000] $r5 + + // write COUNTER + query_counter: + call #cmd_exec_wait + mov $r4 0x80c + shl b32 $r4 6 + ld b32 $r5 D[$r0 + #ctx_query_address_low] + iowr I[$r4 + 0x000] $r5 + iowr I[$r4 + 0x100] $r0 + mov $r5 0x4 + iowr I[$r4 + 0x200] $r5 + add b32 $r4 0x400 + ld b32 $r5 D[$r0 + #ctx_query_address_high] + shl b32 $r5 16 + iowr I[$r4 + 0x000] $r5 + add b32 $r4 0x500 + mov $r5 0x00000300 + iowr I[$r4 + 0x000] $r5 + mov $r5 0x00001110 + sethi $r5 0x13120000 + iowr I[$r4 + 0x100] $r5 + ld b32 $r5 D[$r0 + #ctx_query_counter] + add b32 $r4 0x500 + iowr I[$r4 + 0x000] $r5 + mov $r5 0x00002601 + sethi $r5 0x00010000 + mov $r4 0x800 + shl b32 $r4 6 + iowr I[$r4 + 0x000] $r5 + ret + +// Execute a copy operation +// +// Inputs: +// $r1: irqh state +// $r2: hostirq state +// $r3: data +// 000002000 QUERY_SHORT +// 000001000 QUERY +// 000000100 DST_LINEAR +// 000000010 SRC_LINEAR +// 000000001 FORMAT +// $r4: dispatch table entry +// Outputs: +// $r1: irqh state +// $p1: set on error +// $r2: hostirq state +// $r3: data +cmd_exec: + call #cmd_exec_wait + + // if format requested, call function to calculate it, otherwise + // fill in cpp/xcnt for both surfaces as if (cpp == 1) + xbit $r15 $r3 0 + bra e #cmd_exec_no_format + call #cmd_exec_set_format + mov $r4 0x200 + bra #cmd_exec_init_src_surface + cmd_exec_no_format: + mov $r6 0x810 + shl b32 $r6 6 + mov $r7 1 + st b32 D[$r0 + #ctx_src_cpp] $r7 + st b32 D[$r0 + #ctx_dst_cpp] $r7 + ld b32 $r7 D[$r0 + #ctx_xcnt] + iowr I[$r6 + 0x000] $r7 + iowr I[$r6 + 0x100] $r7 + clear b32 $r4 + + cmd_exec_init_src_surface: + bclr $flags $p2 + clear b32 $r5 + xbit $r15 $r3 4 + bra e #src_tiled + call #cmd_exec_set_surface_linear + bra #cmd_exec_init_dst_surface + src_tiled: + call #cmd_exec_set_surface_tiled + bset $r4 7 + + cmd_exec_init_dst_surface: + bset $flags $p2 + mov $r5 #ctx_dst_address_high - #ctx_src_address_high + xbit $r15 $r3 8 + bra e #dst_tiled + call #cmd_exec_set_surface_linear + bra #cmd_exec_kick + dst_tiled: + call #cmd_exec_set_surface_tiled + bset $r4 8 + + cmd_exec_kick: + mov $r5 0x800 + shl b32 $r5 6 + ld b32 $r6 D[$r0 + #ctx_ycnt] + iowr I[$r5 + 0x100] $r6 + mov $r6 0x0041 + // SRC_TARGET = 1, DST_TARGET = 2 + sethi $r6 0x44000000 + or $r4 $r6 + iowr I[$r5] $r4 + + // if requested, queue up a QUERY write after the copy has completed + xbit $r15 $r3 12 + bra e #cmd_exec_done + call #cmd_exec_query + + cmd_exec_done: + ret + +// Flush write cache +// +// Inputs: +// $r1: irqh state +// $r2: hostirq state +// $r3: data +// $r4: dispatch table entry +// Outputs: +// $r1: irqh state +// $p1: set on error +// $r2: hostirq state +// $r3: data +cmd_wrcache_flush: + mov $r2 0x2200 + clear b32 $r3 + sethi $r3 0x10000 + iowr I[$r2] $r3 + ret + +.align 0x100 diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h new file mode 100644 index 0000000..c92520f --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h @@ -0,0 +1,620 @@ +static u32 nva3_pcopy_data[] = { +/* 0x0000: ctx_object */ + 0x00000000, +/* 0x0004: ctx_dma */ +/* 0x0004: ctx_dma_query */ + 0x00000000, +/* 0x0008: ctx_dma_src */ + 0x00000000, +/* 0x000c: ctx_dma_dst */ + 0x00000000, +/* 0x0010: ctx_query_address_high */ + 0x00000000, +/* 0x0014: ctx_query_address_low */ + 0x00000000, +/* 0x0018: ctx_query_counter */ + 0x00000000, +/* 0x001c: ctx_src_address_high */ + 0x00000000, +/* 0x0020: ctx_src_address_low */ + 0x00000000, +/* 0x0024: ctx_src_pitch */ + 0x00000000, +/* 0x0028: ctx_src_tile_mode */ + 0x00000000, +/* 0x002c: ctx_src_xsize */ + 0x00000000, +/* 0x0030: ctx_src_ysize */ + 0x00000000, +/* 0x0034: ctx_src_zsize */ + 0x00000000, +/* 0x0038: ctx_src_zoff */ + 0x00000000, +/* 0x003c: ctx_src_xoff */ + 0x00000000, +/* 0x0040: ctx_src_yoff */ + 0x00000000, +/* 0x0044: ctx_src_cpp */ + 0x00000000, +/* 0x0048: ctx_dst_address_high */ + 0x00000000, +/* 0x004c: ctx_dst_address_low */ + 0x00000000, +/* 0x0050: ctx_dst_pitch */ + 0x00000000, +/* 0x0054: ctx_dst_tile_mode */ + 0x00000000, +/* 0x0058: ctx_dst_xsize */ + 0x00000000, +/* 0x005c: ctx_dst_ysize */ + 0x00000000, +/* 0x0060: ctx_dst_zsize */ + 0x00000000, +/* 0x0064: ctx_dst_zoff */ + 0x00000000, +/* 0x0068: ctx_dst_xoff */ + 0x00000000, +/* 0x006c: ctx_dst_yoff */ + 0x00000000, +/* 0x0070: ctx_dst_cpp */ + 0x00000000, +/* 0x0074: ctx_format */ + 0x00000000, +/* 0x0078: ctx_swz_const0 */ + 0x00000000, +/* 0x007c: ctx_swz_const1 */ + 0x00000000, +/* 0x0080: ctx_xcnt */ + 0x00000000, +/* 0x0084: ctx_ycnt */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +/* 0x0100: dispatch_table */ + 0x00010000, + 0x00000000, + 0x00000000, + 0x00010040, + 0x00010160, + 0x00000000, + 0x00010050, + 0x00010162, + 0x00000000, + 0x00030060, +/* 0x0128: dispatch_dma */ + 0x00010170, + 0x00000000, + 0x00010170, + 0x00000000, + 0x00010170, + 0x00000000, + 0x00070080, + 0x00000028, + 0xfffff000, + 0x0000002c, + 0xfff80000, + 0x00000030, + 0xffffe000, + 0x00000034, + 0xfffff800, + 0x00000038, + 0xfffff000, + 0x0000003c, + 0xfff80000, + 0x00000040, + 0xffffe000, + 0x00070088, + 0x00000054, + 0xfffff000, + 0x00000058, + 0xfff80000, + 0x0000005c, + 0xffffe000, + 0x00000060, + 0xfffff800, + 0x00000064, + 0xfffff000, + 0x00000068, + 0xfff80000, + 0x0000006c, + 0xffffe000, + 0x000200c0, + 0x00010492, + 0x00000000, + 0x0001051b, + 0x00000000, + 0x000e00c3, + 0x0000001c, + 0xffffff00, + 0x00000020, + 0x00000000, + 0x00000048, + 0xffffff00, + 0x0000004c, + 0x00000000, + 0x00000024, + 0xfff80000, + 0x00000050, + 0xfff80000, + 0x00000080, + 0xffff0000, + 0x00000084, + 0xffffe000, + 0x00000074, + 0xfccc0000, + 0x00000078, + 0x00000000, + 0x0000007c, + 0x00000000, + 0x00000010, + 0xffffff00, + 0x00000014, + 0x00000000, + 0x00000018, + 0x00000000, + 0x00000800, +}; + +static u32 nva3_pcopy_code[] = { +/* 0x0000: main */ + 0x04fe04bd, + 0x3517f000, + 0xf10010fe, + 0xf1040017, + 0xf0fff327, + 0x12d00023, + 0x0c25f0c0, + 0xf40012d0, + 0x17f11031, + 0x27f01200, + 0x0012d003, +/* 0x002f: spin */ + 0xf40031f4, + 0x0ef40028, +/* 0x0035: ih */ + 0x8001cffd, + 0xf40812c4, + 0x21f4060b, +/* 0x0041: ih_no_chsw */ + 0x0412c472, + 0xf4060bf4, +/* 0x004a: ih_no_cmd */ + 0x11c4c321, + 0x4001d00c, +/* 0x0052: swctx */ + 0x47f101f8, + 0x4bfe7700, + 0x0007fe00, + 0xf00204b9, + 0x01f40643, + 0x0604fa09, +/* 0x006b: swctx_load */ + 0xfa060ef4, +/* 0x006e: swctx_done */ + 0x03f80504, +/* 0x0072: chsw */ + 0x27f100f8, + 0x23cf1400, + 0x1e3fc800, + 0xf4170bf4, + 0x21f40132, + 0x1e3af052, + 0xf00023d0, + 0x24d00147, +/* 0x0093: chsw_no_unload */ + 0xcf00f880, + 0x3dc84023, + 0x220bf41e, + 0xf40131f4, + 0x57f05221, + 0x0367f004, +/* 0x00a8: chsw_load_ctx_dma */ + 0xa07856bc, + 0xb6018068, + 0x87d00884, + 0x0162b600, +/* 0x00bb: chsw_finish_load */ + 0xf0f018f4, + 0x23d00237, +/* 0x00c3: dispatch */ + 0xf100f880, + 0xcf190037, + 0x33cf4032, + 0xff24e400, + 0x1024b607, + 0x010057f1, + 0x74bd64bd, +/* 0x00dc: dispatch_loop */ + 0x58005658, + 0x50b60157, + 0x0446b804, + 0xbb4d08f4, + 0x47b80076, + 0x0f08f404, + 0xb60276bb, + 0x57bb0374, + 0xdf0ef400, +/* 0x0100: dispatch_valid_mthd */ + 0xb60246bb, + 0x45bb0344, + 0x01459800, + 0xb00453fd, + 0x1bf40054, + 0x00455820, + 0xb0014658, + 0x1bf40064, + 0x00538009, +/* 0x0127: dispatch_cmd */ + 0xf4300ef4, + 0x55f90132, + 0xf40c01f4, +/* 0x0132: dispatch_invalid_bitfield */ + 0x25f0250e, +/* 0x0135: dispatch_illegal_mthd */ + 0x0125f002, +/* 0x0138: dispatch_error */ + 0x100047f1, + 0xd00042d0, + 0x27f04043, + 0x0002d040, +/* 0x0148: hostirq_wait */ + 0xf08002cf, + 0x24b04024, + 0xf71bf400, +/* 0x0154: dispatch_done */ + 0x1d0027f1, + 0xd00137f0, + 0x00f80023, +/* 0x0160: cmd_nop */ +/* 0x0162: cmd_pm_trigger */ + 0x27f100f8, + 0x34bd2200, + 0xd00233f0, + 0x00f80023, +/* 0x0170: cmd_dma */ + 0x012842b7, + 0xf00145b6, + 0x43801e39, + 0x0040b701, + 0x0644b606, + 0xf80043d0, +/* 0x0189: cmd_exec_set_format */ + 0xf030f400, + 0xb00001b0, + 0x01b00101, + 0x0301b002, + 0xc71d0498, + 0x50b63045, + 0x3446c701, + 0xc70160b6, + 0x70b63847, + 0x0232f401, + 0x94bd84bd, +/* 0x01b4: ncomp_loop */ + 0xb60f4ac4, + 0xb4bd0445, +/* 0x01bc: bpc_loop */ + 0xf404a430, + 0xa5ff0f18, + 0x00cbbbc0, + 0xf40231f4, +/* 0x01ce: cmp_c0 */ + 0x1bf4220e, + 0x10c7f00c, + 0xf400cbbb, +/* 0x01da: cmp_c1 */ + 0xa430160e, + 0x0c18f406, + 0xbb14c7f0, + 0x0ef400cb, +/* 0x01e9: cmp_zero */ + 0x80c7f107, +/* 0x01ed: bpc_next */ + 0x01c83800, + 0xb60180b6, + 0xb5b801b0, + 0xc308f404, + 0xb80190b6, + 0x08f40497, + 0x0065fdb2, + 0x98110680, + 0x68fd2008, + 0x0502f400, +/* 0x0216: dst_xcnt */ + 0x75fd64bd, + 0x1c078000, + 0xf10078fd, + 0xb6081057, + 0x56d00654, + 0x4057d000, + 0x080050b7, + 0xb61c0698, + 0x64b60162, + 0x11079808, + 0xfd0172b6, + 0x56d00567, + 0x0050b700, + 0x0060b401, + 0xb40056d0, + 0x56d00160, + 0x0260b440, + 0xb48056d0, + 0x56d00360, + 0x0050b7c0, + 0x1e069804, + 0x980056d0, + 0x56d01f06, + 0x1030f440, +/* 0x0276: cmd_exec_set_surface_tiled */ + 0x579800f8, + 0x6879c70a, + 0xb66478c7, + 0x77c70280, + 0x0e76b060, + 0xf0091bf4, + 0x0ef40477, +/* 0x0291: xtile64 */ + 0x027cf00f, + 0xfd1170b6, + 0x77f00947, +/* 0x029d: xtileok */ + 0x0f5a9806, + 0xfd115b98, + 0xb7f000ab, + 0x04b7bb01, + 0xff01b2b6, + 0xa7bbc4ab, + 0x105d9805, + 0xbb01e7f0, + 0xe2b604e8, + 0xb4deff01, + 0xb605d8bb, + 0xef9401e0, + 0x02ebbb0c, + 0xf005fefd, + 0x60b7026c, + 0x64b60208, + 0x006fd008, + 0xbb04b7bb, + 0x5f9800cb, + 0x115b980b, + 0xf000fbfd, + 0xb7bb01b7, + 0x01b2b604, + 0xbb00fbbb, + 0xf0f905f7, + 0xf00c5f98, + 0xb8bb01b7, + 0x01b2b604, + 0xbb00fbbb, + 0xf0f905f8, + 0xb60078bb, + 0xb7f00282, + 0x04b8bb01, + 0x9804b9bb, + 0xe7f00e58, + 0x04e9bb01, + 0xff01e2b6, + 0xf7bbf48e, + 0x00cfbb04, + 0xbb0079bb, + 0xf0fc0589, + 0xd9fd90fc, + 0x00adbb00, + 0xfd0089fd, + 0xa8bb008f, + 0x04a7bb00, + 0xbb0192b6, + 0x69d00497, + 0x08579880, + 0xbb075898, + 0x7abb00ac, + 0x0081b600, + 0xfd1084b6, + 0x62b7058b, + 0x67d00600, + 0x0060b700, + 0x0068d004, +/* 0x0382: cmd_exec_set_surface_linear */ + 0x6cf000f8, + 0x0260b702, + 0x0864b602, + 0xd0085798, + 0x60b70067, + 0x57980400, + 0x1074b607, + 0xb70067d0, + 0x98040060, + 0x67d00957, +/* 0x03ab: cmd_exec_wait */ + 0xf900f800, + 0xf110f900, + 0xb6080007, +/* 0x03b6: loop */ + 0x01cf0604, + 0x0114f000, + 0xfcfa1bf4, + 0xf800fc10, +/* 0x03c5: cmd_exec_query */ + 0x0d34c800, + 0xf5701bf4, + 0xf103ab21, + 0xb6080c47, + 0x05980644, + 0x0450b605, + 0xd00045d0, + 0x57f04040, + 0x8045d00c, + 0x040040b7, + 0xb6040598, + 0x45d01054, + 0x0040b700, + 0x0057f105, + 0x0153f00b, + 0xf10045d0, + 0xb6404057, + 0x53f10154, + 0x45d08080, + 0x1057f140, + 0x1253f111, + 0x8045d013, + 0x151457f1, + 0x171653f1, + 0xf1c045d0, + 0xf0260157, + 0x47f10153, + 0x44b60800, + 0x0045d006, +/* 0x0438: query_counter */ + 0x03ab21f5, + 0x080c47f1, + 0x980644b6, + 0x45d00505, + 0x4040d000, + 0xd00457f0, + 0x40b78045, + 0x05980400, + 0x1054b604, + 0xb70045d0, + 0xf1050040, + 0xd0030057, + 0x57f10045, + 0x53f11110, + 0x45d01312, + 0x06059840, + 0x050040b7, + 0xf10045d0, + 0xf0260157, + 0x47f10153, + 0x44b60800, + 0x0045d006, +/* 0x0492: cmd_exec */ + 0x21f500f8, + 0x3fc803ab, + 0x0e0bf400, + 0x018921f5, + 0x020047f1, +/* 0x04a7: cmd_exec_no_format */ + 0xf11e0ef4, + 0xb6081067, + 0x77f00664, + 0x11078001, + 0x981c0780, + 0x67d02007, + 0x4067d000, +/* 0x04c2: cmd_exec_init_src_surface */ + 0x32f444bd, + 0xc854bd02, + 0x0bf4043f, + 0x8221f50a, + 0x0a0ef403, +/* 0x04d4: src_tiled */ + 0x027621f5, +/* 0x04db: cmd_exec_init_dst_surface */ + 0xf40749f0, + 0x57f00231, + 0x083fc82c, + 0xf50a0bf4, + 0xf4038221, +/* 0x04ee: dst_tiled */ + 0x21f50a0e, + 0x49f00276, +/* 0x04f5: cmd_exec_kick */ + 0x0057f108, + 0x0654b608, + 0xd0210698, + 0x67f04056, + 0x0063f141, + 0x0546fd44, + 0xc80054d0, + 0x0bf40c3f, + 0xc521f507, +/* 0x0519: cmd_exec_done */ +/* 0x051b: cmd_wrcache_flush */ + 0xf100f803, + 0xbd220027, + 0x0133f034, + 0xf80023d0, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h new file mode 100644 index 0000000..0d98c6c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h @@ -0,0 +1,606 @@ +static u32 nvc0_pcopy_data[] = { +/* 0x0000: ctx_object */ + 0x00000000, +/* 0x0004: ctx_query_address_high */ + 0x00000000, +/* 0x0008: ctx_query_address_low */ + 0x00000000, +/* 0x000c: ctx_query_counter */ + 0x00000000, +/* 0x0010: ctx_src_address_high */ + 0x00000000, +/* 0x0014: ctx_src_address_low */ + 0x00000000, +/* 0x0018: ctx_src_pitch */ + 0x00000000, +/* 0x001c: ctx_src_tile_mode */ + 0x00000000, +/* 0x0020: ctx_src_xsize */ + 0x00000000, +/* 0x0024: ctx_src_ysize */ + 0x00000000, +/* 0x0028: ctx_src_zsize */ + 0x00000000, +/* 0x002c: ctx_src_zoff */ + 0x00000000, +/* 0x0030: ctx_src_xoff */ + 0x00000000, +/* 0x0034: ctx_src_yoff */ + 0x00000000, +/* 0x0038: ctx_src_cpp */ + 0x00000000, +/* 0x003c: ctx_dst_address_high */ + 0x00000000, +/* 0x0040: ctx_dst_address_low */ + 0x00000000, +/* 0x0044: ctx_dst_pitch */ + 0x00000000, +/* 0x0048: ctx_dst_tile_mode */ + 0x00000000, +/* 0x004c: ctx_dst_xsize */ + 0x00000000, +/* 0x0050: ctx_dst_ysize */ + 0x00000000, +/* 0x0054: ctx_dst_zsize */ + 0x00000000, +/* 0x0058: ctx_dst_zoff */ + 0x00000000, +/* 0x005c: ctx_dst_xoff */ + 0x00000000, +/* 0x0060: ctx_dst_yoff */ + 0x00000000, +/* 0x0064: ctx_dst_cpp */ + 0x00000000, +/* 0x0068: ctx_format */ + 0x00000000, +/* 0x006c: ctx_swz_const0 */ + 0x00000000, +/* 0x0070: ctx_swz_const1 */ + 0x00000000, +/* 0x0074: ctx_xcnt */ + 0x00000000, +/* 0x0078: ctx_ycnt */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +/* 0x0100: dispatch_table */ + 0x00010000, + 0x00000000, + 0x00000000, + 0x00010040, + 0x0001019f, + 0x00000000, + 0x00010050, + 0x000101a1, + 0x00000000, + 0x00070080, + 0x0000001c, + 0xfffff000, + 0x00000020, + 0xfff80000, + 0x00000024, + 0xffffe000, + 0x00000028, + 0xfffff800, + 0x0000002c, + 0xfffff000, + 0x00000030, + 0xfff80000, + 0x00000034, + 0xffffe000, + 0x00070088, + 0x00000048, + 0xfffff000, + 0x0000004c, + 0xfff80000, + 0x00000050, + 0xffffe000, + 0x00000054, + 0xfffff800, + 0x00000058, + 0xfffff000, + 0x0000005c, + 0xfff80000, + 0x00000060, + 0xffffe000, + 0x000200c0, + 0x000104b8, + 0x00000000, + 0x00010541, + 0x00000000, + 0x000e00c3, + 0x00000010, + 0xffffff00, + 0x00000014, + 0x00000000, + 0x0000003c, + 0xffffff00, + 0x00000040, + 0x00000000, + 0x00000018, + 0xfff80000, + 0x00000044, + 0xfff80000, + 0x00000074, + 0xffff0000, + 0x00000078, + 0xffffe000, + 0x00000068, + 0xfccc0000, + 0x0000006c, + 0x00000000, + 0x00000070, + 0x00000000, + 0x00000004, + 0xffffff00, + 0x00000008, + 0x00000000, + 0x0000000c, + 0x00000000, + 0x00000800, +}; + +static u32 nvc0_pcopy_code[] = { +/* 0x0000: main */ + 0x04fe04bd, + 0x3517f000, + 0xf10010fe, + 0xf1040017, + 0xf0fff327, + 0x12d00023, + 0x0c25f0c0, + 0xf40012d0, + 0x17f11031, + 0x27f01200, + 0x0012d003, +/* 0x002f: spin */ + 0xf40031f4, + 0x0ef40028, +/* 0x0035: ih */ + 0x8001cffd, + 0xf40812c4, + 0x21f4060b, +/* 0x0041: ih_no_chsw */ + 0x0412c4ca, + 0xf5070bf4, +/* 0x004b: ih_no_cmd */ + 0xc4010221, + 0x01d00c11, +/* 0x0053: swctx */ + 0xf101f840, + 0xfe770047, + 0x47f1004b, + 0x44cf2100, + 0x0144f000, + 0xb60444b6, + 0xf7f13040, + 0xf4b6061c, + 0x1457f106, + 0x00f5d101, + 0xb6043594, + 0x57fe0250, + 0x0145fe00, + 0x010052b7, + 0x00ff67f1, + 0x56fd60bd, + 0x0253f004, + 0xf80545fa, + 0x0053f003, + 0xd100e7f0, + 0x549800fe, + 0x0845b600, + 0xb6015698, + 0x46fd1864, + 0x0047fe05, + 0xf00204b9, + 0x01f40643, + 0x0604fa09, +/* 0x00c3: swctx_load */ + 0xfa060ef4, +/* 0x00c6: swctx_done */ + 0x03f80504, +/* 0x00ca: chsw */ + 0x27f100f8, + 0x23cf1400, + 0x1e3fc800, + 0xf4170bf4, + 0x21f40132, + 0x1e3af053, + 0xf00023d0, + 0x24d00147, +/* 0x00eb: chsw_no_unload */ + 0xcf00f880, + 0x3dc84023, + 0x090bf41e, + 0xf40131f4, +/* 0x00fa: chsw_finish_load */ + 0x37f05321, + 0x8023d002, +/* 0x0102: dispatch */ + 0x37f100f8, + 0x32cf1900, + 0x0033cf40, + 0x07ff24e4, + 0xf11024b6, + 0xbd010057, +/* 0x011b: dispatch_loop */ + 0x5874bd64, + 0x57580056, + 0x0450b601, + 0xf40446b8, + 0x76bb4d08, + 0x0447b800, + 0xbb0f08f4, + 0x74b60276, + 0x0057bb03, +/* 0x013f: dispatch_valid_mthd */ + 0xbbdf0ef4, + 0x44b60246, + 0x0045bb03, + 0xfd014598, + 0x54b00453, + 0x201bf400, + 0x58004558, + 0x64b00146, + 0x091bf400, + 0xf4005380, +/* 0x0166: dispatch_cmd */ + 0x32f4300e, + 0xf455f901, + 0x0ef40c01, +/* 0x0171: dispatch_invalid_bitfield */ + 0x0225f025, +/* 0x0174: dispatch_illegal_mthd */ +/* 0x0177: dispatch_error */ + 0xf10125f0, + 0xd0100047, + 0x43d00042, + 0x4027f040, +/* 0x0187: hostirq_wait */ + 0xcf0002d0, + 0x24f08002, + 0x0024b040, +/* 0x0193: dispatch_done */ + 0xf1f71bf4, + 0xf01d0027, + 0x23d00137, +/* 0x019f: cmd_nop */ + 0xf800f800, +/* 0x01a1: cmd_pm_trigger */ + 0x0027f100, + 0xf034bd22, + 0x23d00233, +/* 0x01af: cmd_exec_set_format */ + 0xf400f800, + 0x01b0f030, + 0x0101b000, + 0xb00201b0, + 0x04980301, + 0x3045c71a, + 0xc70150b6, + 0x60b63446, + 0x3847c701, + 0xf40170b6, + 0x84bd0232, +/* 0x01da: ncomp_loop */ + 0x4ac494bd, + 0x0445b60f, +/* 0x01e2: bpc_loop */ + 0xa430b4bd, + 0x0f18f404, + 0xbbc0a5ff, + 0x31f400cb, + 0x220ef402, +/* 0x01f4: cmp_c0 */ + 0xf00c1bf4, + 0xcbbb10c7, + 0x160ef400, +/* 0x0200: cmp_c1 */ + 0xf406a430, + 0xc7f00c18, + 0x00cbbb14, +/* 0x020f: cmp_zero */ + 0xf1070ef4, +/* 0x0213: bpc_next */ + 0x380080c7, + 0x80b601c8, + 0x01b0b601, + 0xf404b5b8, + 0x90b6c308, + 0x0497b801, + 0xfdb208f4, + 0x06800065, + 0x1d08980e, + 0xf40068fd, + 0x64bd0502, +/* 0x023c: dst_xcnt */ + 0x800075fd, + 0x78fd1907, + 0x1057f100, + 0x0654b608, + 0xd00056d0, + 0x50b74057, + 0x06980800, + 0x0162b619, + 0x980864b6, + 0x72b60e07, + 0x0567fd01, + 0xb70056d0, + 0xb4010050, + 0x56d00060, + 0x0160b400, + 0xb44056d0, + 0x56d00260, + 0x0360b480, + 0xb7c056d0, + 0x98040050, + 0x56d01b06, + 0x1c069800, + 0xf44056d0, + 0x00f81030, +/* 0x029c: cmd_exec_set_surface_tiled */ + 0xc7075798, + 0x78c76879, + 0x0380b664, + 0xb06077c7, + 0x1bf40e76, + 0x0477f009, +/* 0x02b7: xtile64 */ + 0xf00f0ef4, + 0x70b6027c, + 0x0947fd11, +/* 0x02c3: xtileok */ + 0x980677f0, + 0x5b980c5a, + 0x00abfd0e, + 0xbb01b7f0, + 0xb2b604b7, + 0xc4abff01, + 0x9805a7bb, + 0xe7f00d5d, + 0x04e8bb01, + 0xff01e2b6, + 0xd8bbb4de, + 0x01e0b605, + 0xbb0cef94, + 0xfefd02eb, + 0x026cf005, + 0x020860b7, + 0xd00864b6, + 0xb7bb006f, + 0x00cbbb04, + 0x98085f98, + 0xfbfd0e5b, + 0x01b7f000, + 0xb604b7bb, + 0xfbbb01b2, + 0x05f7bb00, + 0x5f98f0f9, + 0x01b7f009, + 0xb604b8bb, + 0xfbbb01b2, + 0x05f8bb00, + 0x78bbf0f9, + 0x0282b600, + 0xbb01b7f0, + 0xb9bb04b8, + 0x0b589804, + 0xbb01e7f0, + 0xe2b604e9, + 0xf48eff01, + 0xbb04f7bb, + 0x79bb00cf, + 0x0589bb00, + 0x90fcf0fc, + 0xbb00d9fd, + 0x89fd00ad, + 0x008ffd00, + 0xbb00a8bb, + 0x92b604a7, + 0x0497bb01, + 0x988069d0, + 0x58980557, + 0x00acbb04, + 0xb6007abb, + 0x84b60081, + 0x058bfd10, + 0x060062b7, + 0xb70067d0, + 0xd0040060, + 0x00f80068, +/* 0x03a8: cmd_exec_set_surface_linear */ + 0xb7026cf0, + 0xb6020260, + 0x57980864, + 0x0067d005, + 0x040060b7, + 0xb6045798, + 0x67d01074, + 0x0060b700, + 0x06579804, + 0xf80067d0, +/* 0x03d1: cmd_exec_wait */ + 0xf900f900, + 0x0007f110, + 0x0604b608, +/* 0x03dc: loop */ + 0xf00001cf, + 0x1bf40114, + 0xfc10fcfa, +/* 0x03eb: cmd_exec_query */ + 0xc800f800, + 0x1bf40d34, + 0xd121f570, + 0x0c47f103, + 0x0644b608, + 0xb6020598, + 0x45d00450, + 0x4040d000, + 0xd00c57f0, + 0x40b78045, + 0x05980400, + 0x1054b601, + 0xb70045d0, + 0xf1050040, + 0xf00b0057, + 0x45d00153, + 0x4057f100, + 0x0154b640, + 0x808053f1, + 0xf14045d0, + 0xf1111057, + 0xd0131253, + 0x57f18045, + 0x53f11514, + 0x45d01716, + 0x0157f1c0, + 0x0153f026, + 0x080047f1, + 0xd00644b6, +/* 0x045e: query_counter */ + 0x21f50045, + 0x47f103d1, + 0x44b6080c, + 0x02059806, + 0xd00045d0, + 0x57f04040, + 0x8045d004, + 0x040040b7, + 0xb6010598, + 0x45d01054, + 0x0040b700, + 0x0057f105, + 0x0045d003, + 0x111057f1, + 0x131253f1, + 0x984045d0, + 0x40b70305, + 0x45d00500, + 0x0157f100, + 0x0153f026, + 0x080047f1, + 0xd00644b6, + 0x00f80045, +/* 0x04b8: cmd_exec */ + 0x03d121f5, + 0xf4003fc8, + 0x21f50e0b, + 0x47f101af, + 0x0ef40200, +/* 0x04cd: cmd_exec_no_format */ + 0x1067f11e, + 0x0664b608, + 0x800177f0, + 0x07800e07, + 0x1d079819, + 0xd00067d0, + 0x44bd4067, +/* 0x04e8: cmd_exec_init_src_surface */ + 0xbd0232f4, + 0x043fc854, + 0xf50a0bf4, + 0xf403a821, +/* 0x04fa: src_tiled */ + 0x21f50a0e, + 0x49f0029c, +/* 0x0501: cmd_exec_init_dst_surface */ + 0x0231f407, + 0xc82c57f0, + 0x0bf4083f, + 0xa821f50a, + 0x0a0ef403, +/* 0x0514: dst_tiled */ + 0x029c21f5, +/* 0x051b: cmd_exec_kick */ + 0xf10849f0, + 0xb6080057, + 0x06980654, + 0x4056d01e, + 0xf14167f0, + 0xfd440063, + 0x54d00546, + 0x0c3fc800, + 0xf5070bf4, +/* 0x053f: cmd_exec_done */ + 0xf803eb21, +/* 0x0541: cmd_wrcache_flush */ + 0x0027f100, + 0xf034bd22, + 0x23d00133, + 0x0000f800, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c new file mode 100644 index 0000000..d6dc2a6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c @@ -0,0 +1,164 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/client.h> +#include <core/falcon.h> +#include <core/class.h> +#include <core/enum.h> + +#include <subdev/fb.h> +#include <subdev/vm.h> + +#include <engine/fifo.h> +#include <engine/copy.h> + +#include "fuc/nva3.fuc.h" + +struct nva3_copy_priv { + struct nouveau_falcon base; +}; + +/******************************************************************************* + * Copy object classes + ******************************************************************************/ + +static struct nouveau_oclass +nva3_copy_sclass[] = { + { 0x85b5, &nouveau_object_ofuncs }, + {} +}; + +/******************************************************************************* + * PCOPY context + ******************************************************************************/ + +static struct nouveau_oclass +nva3_copy_cclass = { + .handle = NV_ENGCTX(COPY0, 0xa3), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nouveau_falcon_context_ctor, + .dtor = _nouveau_falcon_context_dtor, + .init = _nouveau_falcon_context_init, + .fini = _nouveau_falcon_context_fini, + .rd32 = _nouveau_falcon_context_rd32, + .wr32 = _nouveau_falcon_context_wr32, + + }, +}; + +/******************************************************************************* + * PCOPY engine/subdev functions + ******************************************************************************/ + +static const struct nouveau_enum nva3_copy_isr_error_name[] = { + { 0x0001, "ILLEGAL_MTHD" }, + { 0x0002, "INVALID_ENUM" }, + { 0x0003, "INVALID_BITFIELD" }, + {} +}; + +void +nva3_copy_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_fifo *pfifo = nouveau_fifo(subdev); + struct nouveau_engine *engine = nv_engine(subdev); + struct nouveau_falcon *falcon = (void *)subdev; + struct nouveau_object *engctx; + u32 dispatch = nv_ro32(falcon, 0x01c); + u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16); + u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff; + u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff; + u32 addr = nv_ro32(falcon, 0x040) >> 16; + u32 mthd = (addr & 0x07ff) << 2; + u32 subc = (addr & 0x3800) >> 11; + u32 data = nv_ro32(falcon, 0x044); + int chid; + + engctx = nouveau_engctx_get(engine, inst); + chid = pfifo->chid(pfifo, engctx); + + if (stat & 0x00000040) { + nv_error(falcon, "DISPATCH_ERROR ["); + nouveau_enum_print(nva3_copy_isr_error_name, ssta); + pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n", + chid, inst << 12, nouveau_client_name(engctx), subc, + mthd, data); + nv_wo32(falcon, 0x004, 0x00000040); + stat &= ~0x00000040; + } + + if (stat) { + nv_error(falcon, "unhandled intr 0x%08x\n", stat); + nv_wo32(falcon, 0x004, stat); + } + + nouveau_engctx_put(engctx); +} + +static int +nva3_copy_tlb_flush(struct nouveau_engine *engine) +{ + nv50_vm_flush_engine(&engine->base, 0x0d); + return 0; +} + +static int +nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + bool enable = (nv_device(parent)->chipset != 0xaf); + struct nva3_copy_priv *priv; + int ret; + + ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, enable, + "PCE0", "copy0", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00802000; + nv_subdev(priv)->intr = nva3_copy_intr; + nv_engine(priv)->cclass = &nva3_copy_cclass; + nv_engine(priv)->sclass = nva3_copy_sclass; + nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush; + nv_falcon(priv)->code.data = nva3_pcopy_code; + nv_falcon(priv)->code.size = sizeof(nva3_pcopy_code); + nv_falcon(priv)->data.data = nva3_pcopy_data; + nv_falcon(priv)->data.size = sizeof(nva3_pcopy_data); + return 0; +} + +struct nouveau_oclass +nva3_copy_oclass = { + .handle = NV_ENGINE(COPY0, 0xa3), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nva3_copy_ctor, + .dtor = _nouveau_falcon_dtor, + .init = _nouveau_falcon_init, + .fini = _nouveau_falcon_fini, + .rd32 = _nouveau_falcon_rd32, + .wr32 = _nouveau_falcon_wr32, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c new file mode 100644 index 0000000..b3ed273 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c @@ -0,0 +1,178 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/falcon.h> +#include <core/class.h> +#include <core/enum.h> + +#include <engine/fifo.h> +#include <engine/copy.h> + +#include "fuc/nvc0.fuc.h" + +struct nvc0_copy_priv { + struct nouveau_falcon base; +}; + +/******************************************************************************* + * Copy object classes + ******************************************************************************/ + +static struct nouveau_oclass +nvc0_copy0_sclass[] = { + { 0x90b5, &nouveau_object_ofuncs }, + {}, +}; + +static struct nouveau_oclass +nvc0_copy1_sclass[] = { + { 0x90b8, &nouveau_object_ofuncs }, + {}, +}; + +/******************************************************************************* + * PCOPY context + ******************************************************************************/ + +static struct nouveau_ofuncs +nvc0_copy_context_ofuncs = { + .ctor = _nouveau_falcon_context_ctor, + .dtor = _nouveau_falcon_context_dtor, + .init = _nouveau_falcon_context_init, + .fini = _nouveau_falcon_context_fini, + .rd32 = _nouveau_falcon_context_rd32, + .wr32 = _nouveau_falcon_context_wr32, +}; + +static struct nouveau_oclass +nvc0_copy0_cclass = { + .handle = NV_ENGCTX(COPY0, 0xc0), + .ofuncs = &nvc0_copy_context_ofuncs, +}; + +static struct nouveau_oclass +nvc0_copy1_cclass = { + .handle = NV_ENGCTX(COPY1, 0xc0), + .ofuncs = &nvc0_copy_context_ofuncs, +}; + +/******************************************************************************* + * PCOPY engine/subdev functions + ******************************************************************************/ + +static int +nvc0_copy_init(struct nouveau_object *object) +{ + struct nvc0_copy_priv *priv = (void *)object; + int ret; + + ret = nouveau_falcon_init(&priv->base); + if (ret) + return ret; + + nv_wo32(priv, 0x084, nv_engidx(object) - NVDEV_ENGINE_COPY0); + return 0; +} + +static int +nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_copy_priv *priv; + int ret; + + if (nv_rd32(parent, 0x022500) & 0x00000100) + return -ENODEV; + + ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true, + "PCE0", "copy0", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00000040; + nv_subdev(priv)->intr = nva3_copy_intr; + nv_engine(priv)->cclass = &nvc0_copy0_cclass; + nv_engine(priv)->sclass = nvc0_copy0_sclass; + nv_falcon(priv)->code.data = nvc0_pcopy_code; + nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code); + nv_falcon(priv)->data.data = nvc0_pcopy_data; + nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data); + return 0; +} + +static int +nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_copy_priv *priv; + int ret; + + if (nv_rd32(parent, 0x022500) & 0x00000200) + return -ENODEV; + + ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true, + "PCE1", "copy1", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00000080; + nv_subdev(priv)->intr = nva3_copy_intr; + nv_engine(priv)->cclass = &nvc0_copy1_cclass; + nv_engine(priv)->sclass = nvc0_copy1_sclass; + nv_falcon(priv)->code.data = nvc0_pcopy_code; + nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code); + nv_falcon(priv)->data.data = nvc0_pcopy_data; + nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data); + return 0; +} + +struct nouveau_oclass +nvc0_copy0_oclass = { + .handle = NV_ENGINE(COPY0, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_copy0_ctor, + .dtor = _nouveau_falcon_dtor, + .init = nvc0_copy_init, + .fini = _nouveau_falcon_fini, + .rd32 = _nouveau_falcon_rd32, + .wr32 = _nouveau_falcon_wr32, + }, +}; + +struct nouveau_oclass +nvc0_copy1_oclass = { + .handle = NV_ENGINE(COPY1, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_copy1_ctor, + .dtor = _nouveau_falcon_dtor, + .init = nvc0_copy_init, + .fini = _nouveau_falcon_fini, + .rd32 = _nouveau_falcon_rd32, + .wr32 = _nouveau_falcon_wr32, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c new file mode 100644 index 0000000..dbbe9e8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c @@ -0,0 +1,136 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/enum.h> +#include <core/class.h> +#include <core/engctx.h> + +#include <engine/copy.h> + +struct nve0_copy_priv { + struct nouveau_engine base; +}; + +/******************************************************************************* + * Copy object classes + ******************************************************************************/ + +static struct nouveau_oclass +nve0_copy_sclass[] = { + { 0xa0b5, &nouveau_object_ofuncs }, + {}, +}; + +/******************************************************************************* + * PCOPY context + ******************************************************************************/ + +static struct nouveau_ofuncs +nve0_copy_context_ofuncs = { + .ctor = _nouveau_engctx_ctor, + .dtor = _nouveau_engctx_dtor, + .init = _nouveau_engctx_init, + .fini = _nouveau_engctx_fini, + .rd32 = _nouveau_engctx_rd32, + .wr32 = _nouveau_engctx_wr32, +}; + +static struct nouveau_oclass +nve0_copy_cclass = { + .handle = NV_ENGCTX(COPY0, 0xc0), + .ofuncs = &nve0_copy_context_ofuncs, +}; + +/******************************************************************************* + * PCOPY engine/subdev functions + ******************************************************************************/ + +static int +nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nve0_copy_priv *priv; + int ret; + + if (nv_rd32(parent, 0x022500) & 0x00000100) + return -ENODEV; + + ret = nouveau_engine_create(parent, engine, oclass, true, + "PCE0", "copy0", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00000040; + nv_engine(priv)->cclass = &nve0_copy_cclass; + nv_engine(priv)->sclass = nve0_copy_sclass; + return 0; +} + +static int +nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nve0_copy_priv *priv; + int ret; + + if (nv_rd32(parent, 0x022500) & 0x00000200) + return -ENODEV; + + ret = nouveau_engine_create(parent, engine, oclass, true, + "PCE1", "copy1", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00000080; + nv_engine(priv)->cclass = &nve0_copy_cclass; + nv_engine(priv)->sclass = nve0_copy_sclass; + return 0; +} + +struct nouveau_oclass +nve0_copy0_oclass = { + .handle = NV_ENGINE(COPY0, 0xe0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nve0_copy0_ctor, + .dtor = _nouveau_engine_dtor, + .init = _nouveau_engine_init, + .fini = _nouveau_engine_fini, + }, +}; + +struct nouveau_oclass +nve0_copy1_oclass = { + .handle = NV_ENGINE(COPY1, 0xe0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nve0_copy1_ctor, + .dtor = _nouveau_engine_dtor, + .init = _nouveau_engine_init, + .fini = _nouveau_engine_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc new file mode 100644 index 0000000..629da02 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc @@ -0,0 +1,698 @@ +/* + * fuc microcode for nv98 pcrypt engine + * Copyright (C) 2010 Marcin Kościelnicki + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +.section #nv98_pcrypt_data + +ctx_dma: +ctx_dma_query: .b32 0 +ctx_dma_src: .b32 0 +ctx_dma_dst: .b32 0 +.equ #dma_count 3 +ctx_query_address_high: .b32 0 +ctx_query_address_low: .b32 0 +ctx_query_counter: .b32 0 +ctx_cond_address_high: .b32 0 +ctx_cond_address_low: .b32 0 +ctx_cond_off: .b32 0 +ctx_src_address_high: .b32 0 +ctx_src_address_low: .b32 0 +ctx_dst_address_high: .b32 0 +ctx_dst_address_low: .b32 0 +ctx_mode: .b32 0 +.align 16 +ctx_key: .skip 16 +ctx_iv: .skip 16 + +.align 0x80 +swap: +.skip 32 + +.align 8 +common_cmd_dtable: +.b32 #ctx_query_address_high + 0x20000 ~0xff +.b32 #ctx_query_address_low + 0x20000 ~0xfffffff0 +.b32 #ctx_query_counter + 0x20000 ~0xffffffff +.b32 #cmd_query_get + 0x00000 ~1 +.b32 #ctx_cond_address_high + 0x20000 ~0xff +.b32 #ctx_cond_address_low + 0x20000 ~0xfffffff0 +.b32 #cmd_cond_mode + 0x00000 ~7 +.b32 #cmd_wrcache_flush + 0x00000 ~0 +.equ #common_cmd_max 0x88 + + +.align 8 +engine_cmd_dtable: +.b32 #ctx_key + 0x0 + 0x20000 ~0xffffffff +.b32 #ctx_key + 0x4 + 0x20000 ~0xffffffff +.b32 #ctx_key + 0x8 + 0x20000 ~0xffffffff +.b32 #ctx_key + 0xc + 0x20000 ~0xffffffff +.b32 #ctx_iv + 0x0 + 0x20000 ~0xffffffff +.b32 #ctx_iv + 0x4 + 0x20000 ~0xffffffff +.b32 #ctx_iv + 0x8 + 0x20000 ~0xffffffff +.b32 #ctx_iv + 0xc + 0x20000 ~0xffffffff +.b32 #ctx_src_address_high + 0x20000 ~0xff +.b32 #ctx_src_address_low + 0x20000 ~0xfffffff0 +.b32 #ctx_dst_address_high + 0x20000 ~0xff +.b32 #ctx_dst_address_low + 0x20000 ~0xfffffff0 +.b32 #crypt_cmd_mode + 0x00000 ~0xf +.b32 #crypt_cmd_length + 0x10000 ~0x0ffffff0 +.equ #engine_cmd_max 0xce + +.align 4 +crypt_dtable: +.b16 #crypt_copy_prep #crypt_do_inout +.b16 #crypt_store_prep #crypt_do_out +.b16 #crypt_ecb_e_prep #crypt_do_inout +.b16 #crypt_ecb_d_prep #crypt_do_inout +.b16 #crypt_cbc_e_prep #crypt_do_inout +.b16 #crypt_cbc_d_prep #crypt_do_inout +.b16 #crypt_pcbc_e_prep #crypt_do_inout +.b16 #crypt_pcbc_d_prep #crypt_do_inout +.b16 #crypt_cfb_e_prep #crypt_do_inout +.b16 #crypt_cfb_d_prep #crypt_do_inout +.b16 #crypt_ofb_prep #crypt_do_inout +.b16 #crypt_ctr_prep #crypt_do_inout +.b16 #crypt_cbc_mac_prep #crypt_do_in +.b16 #crypt_cmac_finish_complete_prep #crypt_do_in +.b16 #crypt_cmac_finish_partial_prep #crypt_do_in + +.align 0x100 + +.section #nv98_pcrypt_code + + // $r0 is always set to 0 in our code - this allows some space savings. + clear b32 $r0 + + // set up the interrupt handler + mov $r1 #ih + mov $iv0 $r1 + + // init stack pointer + mov $sp $r0 + + // set interrupt dispatch - route timer, fifo, ctxswitch to i0, others to host + movw $r1 0xfff0 + sethi $r1 0 + mov $r2 0x400 + iowr I[$r2 + 0x300] $r1 + + // enable the interrupts + or $r1 0xc + iowr I[$r2] $r1 + + // enable fifo access and context switching + mov $r1 3 + mov $r2 0x1200 + iowr I[$r2] $r1 + + // enable i0 delivery + bset $flags ie0 + + // sleep forver, waking only for interrupts. + bset $flags $p0 + spin: + sleep $p0 + bra #spin + +// i0 handler +ih: + // see which interrupts we got + iord $r1 I[$r0 + 0x200] + + and $r2 $r1 0x8 + cmpu b32 $r2 0 + bra e #noctx + + // context switch... prepare the regs for xfer + mov $r2 0x7700 + mov $xtargets $r2 + mov $xdbase $r0 + // 128-byte context. + mov $r2 0 + sethi $r2 0x50000 + + // read current channel + mov $r3 0x1400 + iord $r4 I[$r3] + // if bit 30 set, it's active, so we have to unload it first. + shl b32 $r5 $r4 1 + cmps b32 $r5 0 + bra nc #ctxload + + // unload the current channel - save the context + xdst $r0 $r2 + xdwait + // and clear bit 30, then write back + bclr $r4 0x1e + iowr I[$r3] $r4 + // tell PFIFO we unloaded + mov $r4 1 + iowr I[$r3 + 0x200] $r4 + + bra #noctx + + ctxload: + // no channel loaded - perhaps we're requested to load one + iord $r4 I[$r3 + 0x100] + shl b32 $r15 $r4 1 + cmps b32 $r15 0 + // if bit 30 of next channel not set, probably PFIFO is just + // killing a context. do a faux load, without the active bit. + bra nc #dummyload + + // ok, do a real context load. + xdld $r0 $r2 + xdwait + mov $r5 #ctx_dma + mov $r6 #dma_count - 1 + ctxload_dma_loop: + ld b32 $r7 D[$r5 + $r6 * 4] + add b32 $r8 $r6 0x180 + shl b32 $r8 8 + iowr I[$r8] $r7 + sub b32 $r6 1 + bra nc #ctxload_dma_loop + + dummyload: + // tell PFIFO we're done + mov $r5 2 + iowr I[$r3 + 0x200] $r5 + + noctx: + and $r2 $r1 0x4 + cmpu b32 $r2 0 + bra e #nocmd + + // incoming fifo command. + mov $r3 0x1900 + iord $r2 I[$r3 + 0x100] + iord $r3 I[$r3] + // extract the method + and $r4 $r2 0x7ff + // shift the addr to proper position if we need to interrupt later + shl b32 $r2 0x10 + + // mthd 0 and 0x100 [NAME, NOP]: ignore + and $r5 $r4 0x7bf + cmpu b32 $r5 0 + bra e #cmddone + + mov $r5 #engine_cmd_dtable - 0xc0 * 8 + mov $r6 #engine_cmd_max + cmpu b32 $r4 0xc0 + bra nc #dtable_cmd + mov $r5 #common_cmd_dtable - 0x80 * 8 + mov $r6 #common_cmd_max + cmpu b32 $r4 0x80 + bra nc #dtable_cmd + cmpu b32 $r4 0x60 + bra nc #dma_cmd + cmpu b32 $r4 0x50 + bra ne #illegal_mthd + + // mthd 0x140: PM_TRIGGER + mov $r2 0x2200 + clear b32 $r3 + sethi $r3 0x20000 + iowr I[$r2] $r3 + bra #cmddone + + dma_cmd: + // mthd 0x180...: DMA_* + cmpu b32 $r4 0x60+#dma_count + bra nc #illegal_mthd + shl b32 $r5 $r4 2 + add b32 $r5 ((#ctx_dma - 0x60 * 4) & 0xffff) + bset $r3 0x1e + st b32 D[$r5] $r3 + add b32 $r4 0x180 - 0x60 + shl b32 $r4 8 + iowr I[$r4] $r3 + bra #cmddone + + dtable_cmd: + cmpu b32 $r4 $r6 + bra nc #illegal_mthd + shl b32 $r4 3 + add b32 $r4 $r5 + ld b32 $r5 D[$r4 + 4] + and $r5 $r3 + cmpu b32 $r5 0 + bra ne #invalid_bitfield + ld b16 $r5 D[$r4] + ld b16 $r6 D[$r4 + 2] + cmpu b32 $r6 2 + bra e #cmd_setctx + ld b32 $r7 D[$r0 + #ctx_cond_off] + and $r6 $r7 + cmpu b32 $r6 1 + bra e #cmddone + call $r5 + bra $p1 #dispatch_error + bra #cmddone + + cmd_setctx: + st b32 D[$r5] $r3 + bra #cmddone + + + invalid_bitfield: + or $r2 1 + dispatch_error: + illegal_mthd: + mov $r4 0x1000 + iowr I[$r4] $r2 + iowr I[$r4 + 0x100] $r3 + mov $r4 0x40 + iowr I[$r0] $r4 + + im_loop: + iord $r4 I[$r0 + 0x200] + and $r4 0x40 + cmpu b32 $r4 0 + bra ne #im_loop + + cmddone: + // remove the command from FIFO + mov $r3 0x1d00 + mov $r4 1 + iowr I[$r3] $r4 + + nocmd: + // ack the processed interrupts + and $r1 $r1 0xc + iowr I[$r0 + 0x100] $r1 +iret + +cmd_query_get: + // if bit 0 of param set, trigger interrupt afterwards. + setp $p1 $r3 + or $r2 3 + + // read PTIMER, beware of races... + mov $r4 0xb00 + ptimer_retry: + iord $r6 I[$r4 + 0x100] + iord $r5 I[$r4] + iord $r7 I[$r4 + 0x100] + cmpu b32 $r6 $r7 + bra ne #ptimer_retry + + // prepare the query structure + ld b32 $r4 D[$r0 + #ctx_query_counter] + st b32 D[$r0 + #swap + 0x0] $r4 + st b32 D[$r0 + #swap + 0x4] $r0 + st b32 D[$r0 + #swap + 0x8] $r5 + st b32 D[$r0 + #swap + 0xc] $r6 + + // will use target 0, DMA_QUERY. + mov $xtargets $r0 + + ld b32 $r4 D[$r0 + #ctx_query_address_high] + shl b32 $r4 0x18 + mov $xdbase $r4 + + ld b32 $r4 D[$r0 + #ctx_query_address_low] + mov $r5 #swap + sethi $r5 0x20000 + xdst $r4 $r5 + xdwait + + ret + +cmd_cond_mode: + // if >= 5, INVALID_ENUM + bset $flags $p1 + or $r2 2 + cmpu b32 $r3 5 + bra nc #return + + // otherwise, no error. + bclr $flags $p1 + + // if < 2, no QUERY object is involved + cmpu b32 $r3 2 + bra nc #cmd_cond_mode_queryful + + xor $r3 1 + st b32 D[$r0 + #ctx_cond_off] $r3 + return: + ret + + cmd_cond_mode_queryful: + // ok, will need to pull a QUERY object, prepare offsets + ld b32 $r4 D[$r0 + #ctx_cond_address_high] + ld b32 $r5 D[$r0 + #ctx_cond_address_low] + and $r6 $r5 0xff + shr b32 $r5 8 + shl b32 $r4 0x18 + or $r4 $r5 + mov $xdbase $r4 + mov $xtargets $r0 + + // pull the first one + mov $r5 #swap + sethi $r5 0x20000 + xdld $r6 $r5 + + // if == 2, only a single QUERY is involved... + cmpu b32 $r3 2 + bra ne #cmd_cond_mode_double + + xdwait + ld b32 $r4 D[$r0 + #swap + 4] + cmpu b32 $r4 0 + xbit $r4 $flags z + st b32 D[$r0 + #ctx_cond_off] $r4 + ret + + // ok, we'll need to pull second one too + cmd_cond_mode_double: + add b32 $r6 0x10 + add b32 $r5 0x10 + xdld $r6 $r5 + xdwait + + // compare COUNTERs + ld b32 $r5 D[$r0 + #swap + 0x00] + ld b32 $r6 D[$r0 + #swap + 0x10] + cmpu b32 $r5 $r6 + xbit $r4 $flags z + + // compare RESen + ld b32 $r5 D[$r0 + #swap + 0x04] + ld b32 $r6 D[$r0 + #swap + 0x14] + cmpu b32 $r5 $r6 + xbit $r5 $flags z + and $r4 $r5 + + // and negate or not, depending on mode + cmpu b32 $r3 3 + xbit $r5 $flags z + xor $r4 $r5 + st b32 D[$r0 + #ctx_cond_off] $r4 + ret + +cmd_wrcache_flush: + bclr $flags $p1 + mov $r2 0x2200 + clear b32 $r3 + sethi $r3 0x10000 + iowr I[$r2] $r3 + ret + +crypt_cmd_mode: + // if >= 0xf, INVALID_ENUM + bset $flags $p1 + or $r2 2 + cmpu b32 $r3 0xf + bra nc #crypt_cmd_mode_return + + bclr $flags $p1 + st b32 D[$r0 + #ctx_mode] $r3 + + crypt_cmd_mode_return: + ret + +crypt_cmd_length: + // nop if length == 0 + cmpu b32 $r3 0 + bra e #crypt_cmd_mode_return + + // init key, IV + cxset 3 + mov $r4 #ctx_key + sethi $r4 0x70000 + xdst $r0 $r4 + mov $r4 #ctx_iv + sethi $r4 0x60000 + xdst $r0 $r4 + xdwait + ckeyreg $c7 + + // prepare the targets + mov $r4 0x2100 + mov $xtargets $r4 + + // prepare src address + ld b32 $r4 D[$r0 + #ctx_src_address_high] + ld b32 $r5 D[$r0 + #ctx_src_address_low] + shr b32 $r8 $r5 8 + shl b32 $r4 0x18 + or $r4 $r8 + and $r5 $r5 0xff + + // prepare dst address + ld b32 $r6 D[$r0 + #ctx_dst_address_high] + ld b32 $r7 D[$r0 + #ctx_dst_address_low] + shr b32 $r8 $r7 8 + shl b32 $r6 0x18 + or $r6 $r8 + and $r7 $r7 0xff + + // find the proper prep & do functions + ld b32 $r8 D[$r0 + #ctx_mode] + shl b32 $r8 2 + + // run prep + ld b16 $r9 D[$r8 + #crypt_dtable] + call $r9 + + // do it + ld b16 $r9 D[$r8 + #crypt_dtable + 2] + call $r9 + cxset 1 + xdwait + cxset 0x61 + xdwait + xdwait + + // update src address + shr b32 $r8 $r4 0x18 + shl b32 $r9 $r4 8 + add b32 $r9 $r5 + adc b32 $r8 0 + st b32 D[$r0 + #ctx_src_address_high] $r8 + st b32 D[$r0 + #ctx_src_address_low] $r9 + + // update dst address + shr b32 $r8 $r6 0x18 + shl b32 $r9 $r6 8 + add b32 $r9 $r7 + adc b32 $r8 0 + st b32 D[$r0 + #ctx_dst_address_high] $r8 + st b32 D[$r0 + #ctx_dst_address_low] $r9 + + // pull updated IV + cxset 2 + mov $r4 #ctx_iv + sethi $r4 0x60000 + xdld $r0 $r4 + xdwait + + ret + + +crypt_copy_prep: + cs0begin 2 + cxsin $c0 + cxsout $c0 + ret + +crypt_store_prep: + cs0begin 1 + cxsout $c6 + ret + +crypt_ecb_e_prep: + cs0begin 3 + cxsin $c0 + cenc $c0 $c0 + cxsout $c0 + ret + +crypt_ecb_d_prep: + ckexp $c7 $c7 + cs0begin 3 + cxsin $c0 + cdec $c0 $c0 + cxsout $c0 + ret + +crypt_cbc_e_prep: + cs0begin 4 + cxsin $c0 + cxor $c6 $c0 + cenc $c6 $c6 + cxsout $c6 + ret + +crypt_cbc_d_prep: + ckexp $c7 $c7 + cs0begin 5 + cmov $c2 $c6 + cxsin $c6 + cdec $c0 $c6 + cxor $c0 $c2 + cxsout $c0 + ret + +crypt_pcbc_e_prep: + cs0begin 5 + cxsin $c0 + cxor $c6 $c0 + cenc $c6 $c6 + cxsout $c6 + cxor $c6 $c0 + ret + +crypt_pcbc_d_prep: + ckexp $c7 $c7 + cs0begin 5 + cxsin $c0 + cdec $c1 $c0 + cxor $c6 $c1 + cxsout $c6 + cxor $c6 $c0 + ret + +crypt_cfb_e_prep: + cs0begin 4 + cenc $c6 $c6 + cxsin $c0 + cxor $c6 $c0 + cxsout $c6 + ret + +crypt_cfb_d_prep: + cs0begin 4 + cenc $c0 $c6 + cxsin $c6 + cxor $c0 $c6 + cxsout $c0 + ret + +crypt_ofb_prep: + cs0begin 4 + cenc $c6 $c6 + cxsin $c0 + cxor $c0 $c6 + cxsout $c0 + ret + +crypt_ctr_prep: + cs0begin 5 + cenc $c1 $c6 + cadd $c6 1 + cxsin $c0 + cxor $c0 $c1 + cxsout $c0 + ret + +crypt_cbc_mac_prep: + cs0begin 3 + cxsin $c0 + cxor $c6 $c0 + cenc $c6 $c6 + ret + +crypt_cmac_finish_complete_prep: + cs0begin 7 + cxsin $c0 + cxor $c6 $c0 + cxor $c0 $c0 + cenc $c0 $c0 + cprecmac $c0 $c0 + cxor $c6 $c0 + cenc $c6 $c6 + ret + +crypt_cmac_finish_partial_prep: + cs0begin 8 + cxsin $c0 + cxor $c6 $c0 + cxor $c0 $c0 + cenc $c0 $c0 + cprecmac $c0 $c0 + cprecmac $c0 $c0 + cxor $c6 $c0 + cenc $c6 $c6 + ret + +// TODO +crypt_do_in: + add b32 $r3 $r5 + mov $xdbase $r4 + mov $r9 #swap + sethi $r9 0x20000 + crypt_do_in_loop: + xdld $r5 $r9 + xdwait + cxset 0x22 + xdst $r0 $r9 + cs0exec 1 + xdwait + add b32 $r5 0x10 + cmpu b32 $r5 $r3 + bra ne #crypt_do_in_loop + cxset 1 + xdwait + ret + +crypt_do_out: + add b32 $r3 $r7 + mov $xdbase $r6 + mov $r9 #swap + sethi $r9 0x20000 + crypt_do_out_loop: + cs0exec 1 + cxset 0x61 + xdld $r7 $r9 + xdst $r7 $r9 + cxset 1 + xdwait + add b32 $r7 0x10 + cmpu b32 $r7 $r3 + bra ne #crypt_do_out_loop + ret + +crypt_do_inout: + add b32 $r3 $r5 + mov $r9 #swap + sethi $r9 0x20000 + crypt_do_inout_loop: + mov $xdbase $r4 + xdld $r5 $r9 + xdwait + cxset 0x21 + xdst $r0 $r9 + cs0exec 1 + cxset 0x61 + mov $xdbase $r6 + xdld $r7 $r9 + xdst $r7 $r9 + cxset 1 + xdwait + add b32 $r5 0x10 + add b32 $r7 0x10 + cmpu b32 $r5 $r3 + bra ne #crypt_do_inout_loop + ret + +.align 0x100 diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h new file mode 100644 index 0000000..09962e4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h @@ -0,0 +1,584 @@ +static uint32_t nv98_pcrypt_data[] = { +/* 0x0000: ctx_dma */ +/* 0x0000: ctx_dma_query */ + 0x00000000, +/* 0x0004: ctx_dma_src */ + 0x00000000, +/* 0x0008: ctx_dma_dst */ + 0x00000000, +/* 0x000c: ctx_query_address_high */ + 0x00000000, +/* 0x0010: ctx_query_address_low */ + 0x00000000, +/* 0x0014: ctx_query_counter */ + 0x00000000, +/* 0x0018: ctx_cond_address_high */ + 0x00000000, +/* 0x001c: ctx_cond_address_low */ + 0x00000000, +/* 0x0020: ctx_cond_off */ + 0x00000000, +/* 0x0024: ctx_src_address_high */ + 0x00000000, +/* 0x0028: ctx_src_address_low */ + 0x00000000, +/* 0x002c: ctx_dst_address_high */ + 0x00000000, +/* 0x0030: ctx_dst_address_low */ + 0x00000000, +/* 0x0034: ctx_mode */ + 0x00000000, + 0x00000000, + 0x00000000, +/* 0x0040: ctx_key */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +/* 0x0050: ctx_iv */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +/* 0x0080: swap */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +/* 0x00a0: common_cmd_dtable */ + 0x0002000c, + 0xffffff00, + 0x00020010, + 0x0000000f, + 0x00020014, + 0x00000000, + 0x00000192, + 0xfffffffe, + 0x00020018, + 0xffffff00, + 0x0002001c, + 0x0000000f, + 0x000001d7, + 0xfffffff8, + 0x00000260, + 0xffffffff, +/* 0x00e0: engine_cmd_dtable */ + 0x00020040, + 0x00000000, + 0x00020044, + 0x00000000, + 0x00020048, + 0x00000000, + 0x0002004c, + 0x00000000, + 0x00020050, + 0x00000000, + 0x00020054, + 0x00000000, + 0x00020058, + 0x00000000, + 0x0002005c, + 0x00000000, + 0x00020024, + 0xffffff00, + 0x00020028, + 0x0000000f, + 0x0002002c, + 0xffffff00, + 0x00020030, + 0x0000000f, + 0x00000271, + 0xfffffff0, + 0x00010285, + 0xf000000f, +/* 0x0150: crypt_dtable */ + 0x04db0321, + 0x04b1032f, + 0x04db0339, + 0x04db034b, + 0x04db0361, + 0x04db0377, + 0x04db0395, + 0x04db03af, + 0x04db03cd, + 0x04db03e3, + 0x04db03f9, + 0x04db040f, + 0x04830429, + 0x0483043b, + 0x0483045d, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +}; + +static uint32_t nv98_pcrypt_code[] = { + 0x17f004bd, + 0x0010fe35, + 0xf10004fe, + 0xf0fff017, + 0x27f10013, + 0x21d00400, + 0x0c15f0c0, + 0xf00021d0, + 0x27f10317, + 0x21d01200, + 0x1031f400, +/* 0x002f: spin */ + 0xf40031f4, + 0x0ef40028, +/* 0x0035: ih */ + 0x8001cffd, + 0xb00812c4, + 0x0bf40024, + 0x0027f167, + 0x002bfe77, + 0xf00007fe, + 0x23f00027, + 0x0037f105, + 0x0034cf14, + 0xb0014594, + 0x18f40055, + 0x0602fa17, + 0x4af003f8, + 0x0034d01e, + 0xd00147f0, + 0x0ef48034, +/* 0x0075: ctxload */ + 0x4034cf33, + 0xb0014f94, + 0x18f400f5, + 0x0502fa21, + 0x57f003f8, + 0x0267f000, +/* 0x008c: ctxload_dma_loop */ + 0xa07856bc, + 0xb6018068, + 0x87d00884, + 0x0162b600, +/* 0x009f: dummyload */ + 0xf0f018f4, + 0x35d00257, +/* 0x00a5: noctx */ + 0x0412c480, + 0xf50024b0, + 0xf100df0b, + 0xcf190037, + 0x33cf4032, + 0xff24e400, + 0x1024b607, + 0x07bf45e4, + 0xf50054b0, + 0xf100b90b, + 0xf1fae057, + 0xb000ce67, + 0x18f4c044, + 0xa057f14d, + 0x8867f1fc, + 0x8044b000, + 0xb03f18f4, + 0x18f46044, + 0x5044b019, + 0xf1741bf4, + 0xbd220027, + 0x0233f034, + 0xf50023d0, +/* 0x0103: dma_cmd */ + 0xb000810e, + 0x18f46344, + 0x0245945e, + 0xfe8050b7, + 0x801e39f0, + 0x40b70053, + 0x44b60120, + 0x0043d008, +/* 0x0123: dtable_cmd */ + 0xb8600ef4, + 0x18f40446, + 0x0344b63e, + 0x980045bb, + 0x53fd0145, + 0x0054b004, + 0x58291bf4, + 0x46580045, + 0x0264b001, + 0x98170bf4, + 0x67fd0807, + 0x0164b004, + 0xf9300bf4, + 0x0f01f455, +/* 0x015b: cmd_setctx */ + 0x80280ef4, + 0x0ef40053, +/* 0x0161: invalid_bitfield */ + 0x0125f022, +/* 0x0164: dispatch_error */ +/* 0x0164: illegal_mthd */ + 0x100047f1, + 0xd00042d0, + 0x47f04043, + 0x0004d040, +/* 0x0174: im_loop */ + 0xf08004cf, + 0x44b04044, + 0xf71bf400, +/* 0x0180: cmddone */ + 0x1d0037f1, + 0xd00147f0, +/* 0x018a: nocmd */ + 0x11c40034, + 0x4001d00c, +/* 0x0192: cmd_query_get */ + 0x38f201f8, + 0x0325f001, + 0x0b0047f1, +/* 0x019c: ptimer_retry */ + 0xcf4046cf, + 0x47cf0045, + 0x0467b840, + 0x98f41bf4, + 0x04800504, + 0x21008020, + 0x80220580, + 0x0bfe2306, + 0x03049800, + 0xfe1844b6, + 0x04980047, + 0x8057f104, + 0x0253f000, + 0xf80645fa, +/* 0x01d7: cmd_cond_mode */ + 0xf400f803, + 0x25f00131, + 0x0534b002, + 0xf41218f4, + 0x34b00132, + 0x0b18f402, + 0x800136f0, +/* 0x01f2: return */ + 0x00f80803, +/* 0x01f4: cmd_cond_mode_queryful */ + 0x98060498, + 0x56c40705, + 0x0855b6ff, + 0xfd1844b6, + 0x47fe0545, + 0x000bfe00, + 0x008057f1, + 0xfa0253f0, + 0x34b00565, + 0x131bf402, + 0x049803f8, + 0x0044b021, + 0x800b4cf0, + 0x00f80804, +/* 0x022c: cmd_cond_mode_double */ + 0xb61060b6, + 0x65fa1050, + 0x9803f805, + 0x06982005, + 0x0456b824, + 0x980b4cf0, + 0x06982105, + 0x0456b825, + 0xfd0b5cf0, + 0x34b00445, + 0x0b5cf003, + 0x800645fd, + 0x00f80804, +/* 0x0260: cmd_wrcache_flush */ + 0xf10132f4, + 0xbd220027, + 0x0133f034, + 0xf80023d0, +/* 0x0271: crypt_cmd_mode */ + 0x0131f400, + 0xb00225f0, + 0x18f40f34, + 0x0132f409, +/* 0x0283: crypt_cmd_mode_return */ + 0xf80d0380, +/* 0x0285: crypt_cmd_length */ + 0x0034b000, + 0xf4fb0bf4, + 0x47f0033c, + 0x0743f040, + 0xf00604fa, + 0x43f05047, + 0x0604fa06, + 0x3cf503f8, + 0x47f1c407, + 0x4bfe2100, + 0x09049800, + 0x950a0598, + 0x44b60858, + 0x0548fd18, + 0x98ff55c4, + 0x07980b06, + 0x0878950c, + 0xfd1864b6, + 0x77c40568, + 0x0d0898ff, + 0x580284b6, + 0x95f9a889, + 0xf9a98958, + 0x013cf495, + 0x3cf403f8, + 0xf803f861, + 0x18489503, + 0xbb084994, + 0x81b60095, + 0x09088000, + 0x950a0980, + 0x69941868, + 0x0097bb08, + 0x800081b6, + 0x09800b08, + 0x023cf40c, + 0xf05047f0, + 0x04fa0643, + 0xf803f805, +/* 0x0321: crypt_copy_prep */ + 0x203cf500, + 0x003cf594, + 0x003cf588, +/* 0x032f: crypt_store_prep */ + 0xf500f88c, + 0xf594103c, + 0xf88c063c, +/* 0x0339: crypt_ecb_e_prep */ + 0x303cf500, + 0x003cf594, + 0x003cf588, + 0x003cf5d0, +/* 0x034b: crypt_ecb_d_prep */ + 0xf500f88c, + 0xf5c8773c, + 0xf594303c, + 0xf588003c, + 0xf5d4003c, + 0xf88c003c, +/* 0x0361: crypt_cbc_e_prep */ + 0x403cf500, + 0x003cf594, + 0x063cf588, + 0x663cf5ac, + 0x063cf5d0, +/* 0x0377: crypt_cbc_d_prep */ + 0xf500f88c, + 0xf5c8773c, + 0xf594503c, + 0xf584623c, + 0xf588063c, + 0xf5d4603c, + 0xf5ac203c, + 0xf88c003c, +/* 0x0395: crypt_pcbc_e_prep */ + 0x503cf500, + 0x003cf594, + 0x063cf588, + 0x663cf5ac, + 0x063cf5d0, + 0x063cf58c, +/* 0x03af: crypt_pcbc_d_prep */ + 0xf500f8ac, + 0xf5c8773c, + 0xf594503c, + 0xf588003c, + 0xf5d4013c, + 0xf5ac163c, + 0xf58c063c, + 0xf8ac063c, +/* 0x03cd: crypt_cfb_e_prep */ + 0x403cf500, + 0x663cf594, + 0x003cf5d0, + 0x063cf588, + 0x063cf5ac, +/* 0x03e3: crypt_cfb_d_prep */ + 0xf500f88c, + 0xf594403c, + 0xf5d0603c, + 0xf588063c, + 0xf5ac603c, + 0xf88c003c, +/* 0x03f9: crypt_ofb_prep */ + 0x403cf500, + 0x663cf594, + 0x003cf5d0, + 0x603cf588, + 0x003cf5ac, +/* 0x040f: crypt_ctr_prep */ + 0xf500f88c, + 0xf594503c, + 0xf5d0613c, + 0xf5b0163c, + 0xf588003c, + 0xf5ac103c, + 0xf88c003c, +/* 0x0429: crypt_cbc_mac_prep */ + 0x303cf500, + 0x003cf594, + 0x063cf588, + 0x663cf5ac, +/* 0x043b: crypt_cmac_finish_complete_prep */ + 0xf500f8d0, + 0xf594703c, + 0xf588003c, + 0xf5ac063c, + 0xf5ac003c, + 0xf5d0003c, + 0xf5bc003c, + 0xf5ac063c, + 0xf8d0663c, +/* 0x045d: crypt_cmac_finish_partial_prep */ + 0x803cf500, + 0x003cf594, + 0x063cf588, + 0x003cf5ac, + 0x003cf5ac, + 0x003cf5d0, + 0x003cf5bc, + 0x063cf5bc, + 0x663cf5ac, +/* 0x0483: crypt_do_in */ + 0xbb00f8d0, + 0x47fe0035, + 0x8097f100, + 0x0293f000, +/* 0x0490: crypt_do_in_loop */ + 0xf80559fa, + 0x223cf403, + 0xf50609fa, + 0xf898103c, + 0x1050b603, + 0xf40453b8, + 0x3cf4e91b, + 0xf803f801, +/* 0x04b1: crypt_do_out */ + 0x0037bb00, + 0xf10067fe, + 0xf0008097, +/* 0x04be: crypt_do_out_loop */ + 0x3cf50293, + 0x3cf49810, + 0x0579fa61, + 0xf40679fa, + 0x03f8013c, + 0xb81070b6, + 0x1bf40473, +/* 0x04db: crypt_do_inout */ + 0xbb00f8e8, + 0x97f10035, + 0x93f00080, +/* 0x04e5: crypt_do_inout_loop */ + 0x0047fe02, + 0xf80559fa, + 0x213cf403, + 0xf50609fa, + 0xf498103c, + 0x67fe613c, + 0x0579fa00, + 0xf40679fa, + 0x03f8013c, + 0xb61050b6, + 0x53b81070, + 0xd41bf404, + 0x000000f8, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c new file mode 100644 index 0000000..5bc021f --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c @@ -0,0 +1,197 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/client.h> +#include <core/os.h> +#include <core/enum.h> +#include <core/class.h> +#include <core/engctx.h> +#include <core/gpuobj.h> + +#include <subdev/fb.h> + +#include <engine/fifo.h> +#include <engine/crypt.h> + +struct nv84_crypt_priv { + struct nouveau_engine base; +}; + +/******************************************************************************* + * Crypt object classes + ******************************************************************************/ + +static int +nv84_crypt_object_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_gpuobj *obj; + int ret; + + ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent, + 16, 16, 0, &obj); + *pobject = nv_object(obj); + if (ret) + return ret; + + nv_wo32(obj, 0x00, nv_mclass(obj)); + nv_wo32(obj, 0x04, 0x00000000); + nv_wo32(obj, 0x08, 0x00000000); + nv_wo32(obj, 0x0c, 0x00000000); + return 0; +} + +static struct nouveau_ofuncs +nv84_crypt_ofuncs = { + .ctor = nv84_crypt_object_ctor, + .dtor = _nouveau_gpuobj_dtor, + .init = _nouveau_gpuobj_init, + .fini = _nouveau_gpuobj_fini, + .rd32 = _nouveau_gpuobj_rd32, + .wr32 = _nouveau_gpuobj_wr32, +}; + +static struct nouveau_oclass +nv84_crypt_sclass[] = { + { 0x74c1, &nv84_crypt_ofuncs }, + {} +}; + +/******************************************************************************* + * PCRYPT context + ******************************************************************************/ + +static struct nouveau_oclass +nv84_crypt_cclass = { + .handle = NV_ENGCTX(CRYPT, 0x84), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nouveau_engctx_ctor, + .dtor = _nouveau_engctx_dtor, + .init = _nouveau_engctx_init, + .fini = _nouveau_engctx_fini, + .rd32 = _nouveau_engctx_rd32, + .wr32 = _nouveau_engctx_wr32, + }, +}; + +/******************************************************************************* + * PCRYPT engine/subdev functions + ******************************************************************************/ + +static const struct nouveau_bitfield nv84_crypt_intr_mask[] = { + { 0x00000001, "INVALID_STATE" }, + { 0x00000002, "ILLEGAL_MTHD" }, + { 0x00000004, "ILLEGAL_CLASS" }, + { 0x00000080, "QUERY" }, + { 0x00000100, "FAULT" }, + {} +}; + +static void +nv84_crypt_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_fifo *pfifo = nouveau_fifo(subdev); + struct nouveau_engine *engine = nv_engine(subdev); + struct nouveau_object *engctx; + struct nv84_crypt_priv *priv = (void *)subdev; + u32 stat = nv_rd32(priv, 0x102130); + u32 mthd = nv_rd32(priv, 0x102190); + u32 data = nv_rd32(priv, 0x102194); + u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff; + int chid; + + engctx = nouveau_engctx_get(engine, inst); + chid = pfifo->chid(pfifo, engctx); + + if (stat) { + nv_error(priv, "%s", ""); + nouveau_bitfield_print(nv84_crypt_intr_mask, stat); + pr_cont(" ch %d [0x%010llx %s] mthd 0x%04x data 0x%08x\n", + chid, (u64)inst << 12, nouveau_client_name(engctx), + mthd, data); + } + + nv_wr32(priv, 0x102130, stat); + nv_wr32(priv, 0x10200c, 0x10); + + nouveau_engctx_put(engctx); +} + +static int +nv84_crypt_tlb_flush(struct nouveau_engine *engine) +{ + nv50_vm_flush_engine(&engine->base, 0x0a); + return 0; +} + +static int +nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv84_crypt_priv *priv; + int ret; + + ret = nouveau_engine_create(parent, engine, oclass, true, + "PCRYPT", "crypt", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00004000; + nv_subdev(priv)->intr = nv84_crypt_intr; + nv_engine(priv)->cclass = &nv84_crypt_cclass; + nv_engine(priv)->sclass = nv84_crypt_sclass; + nv_engine(priv)->tlb_flush = nv84_crypt_tlb_flush; + return 0; +} + +static int +nv84_crypt_init(struct nouveau_object *object) +{ + struct nv84_crypt_priv *priv = (void *)object; + int ret; + + ret = nouveau_engine_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x102130, 0xffffffff); + nv_wr32(priv, 0x102140, 0xffffffbf); + nv_wr32(priv, 0x10200c, 0x00000010); + return 0; +} + +struct nouveau_oclass +nv84_crypt_oclass = { + .handle = NV_ENGINE(CRYPT, 0x84), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv84_crypt_ctor, + .dtor = _nouveau_engine_dtor, + .init = nv84_crypt_init, + .fini = _nouveau_engine_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c new file mode 100644 index 0000000..8bf8955 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c @@ -0,0 +1,165 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/client.h> +#include <core/os.h> +#include <core/enum.h> +#include <core/class.h> +#include <core/engctx.h> +#include <core/falcon.h> + +#include <subdev/timer.h> +#include <subdev/fb.h> + +#include <engine/fifo.h> +#include <engine/crypt.h> + +#include "fuc/nv98.fuc.h" + +struct nv98_crypt_priv { + struct nouveau_falcon base; +}; + +/******************************************************************************* + * Crypt object classes + ******************************************************************************/ + +static struct nouveau_oclass +nv98_crypt_sclass[] = { + { 0x88b4, &nouveau_object_ofuncs }, + {}, +}; + +/******************************************************************************* + * PCRYPT context + ******************************************************************************/ + +static struct nouveau_oclass +nv98_crypt_cclass = { + .handle = NV_ENGCTX(CRYPT, 0x98), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nouveau_falcon_context_ctor, + .dtor = _nouveau_falcon_context_dtor, + .init = _nouveau_falcon_context_init, + .fini = _nouveau_falcon_context_fini, + .rd32 = _nouveau_falcon_context_rd32, + .wr32 = _nouveau_falcon_context_wr32, + }, +}; + +/******************************************************************************* + * PCRYPT engine/subdev functions + ******************************************************************************/ + +static const struct nouveau_enum nv98_crypt_isr_error_name[] = { + { 0x0000, "ILLEGAL_MTHD" }, + { 0x0001, "INVALID_BITFIELD" }, + { 0x0002, "INVALID_ENUM" }, + { 0x0003, "QUERY" }, + {} +}; + +static void +nv98_crypt_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_fifo *pfifo = nouveau_fifo(subdev); + struct nouveau_engine *engine = nv_engine(subdev); + struct nouveau_object *engctx; + struct nv98_crypt_priv *priv = (void *)subdev; + u32 disp = nv_rd32(priv, 0x08701c); + u32 stat = nv_rd32(priv, 0x087008) & disp & ~(disp >> 16); + u32 inst = nv_rd32(priv, 0x087050) & 0x3fffffff; + u32 ssta = nv_rd32(priv, 0x087040) & 0x0000ffff; + u32 addr = nv_rd32(priv, 0x087040) >> 16; + u32 mthd = (addr & 0x07ff) << 2; + u32 subc = (addr & 0x3800) >> 11; + u32 data = nv_rd32(priv, 0x087044); + int chid; + + engctx = nouveau_engctx_get(engine, inst); + chid = pfifo->chid(pfifo, engctx); + + if (stat & 0x00000040) { + nv_error(priv, "DISPATCH_ERROR ["); + nouveau_enum_print(nv98_crypt_isr_error_name, ssta); + pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n", + chid, (u64)inst << 12, nouveau_client_name(engctx), + subc, mthd, data); + nv_wr32(priv, 0x087004, 0x00000040); + stat &= ~0x00000040; + } + + if (stat) { + nv_error(priv, "unhandled intr 0x%08x\n", stat); + nv_wr32(priv, 0x087004, stat); + } + + nouveau_engctx_put(engctx); +} + +static int +nv98_crypt_tlb_flush(struct nouveau_engine *engine) +{ + nv50_vm_flush_engine(&engine->base, 0x0a); + return 0; +} + +static int +nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv98_crypt_priv *priv; + int ret; + + ret = nouveau_falcon_create(parent, engine, oclass, 0x087000, true, + "PCRYPT", "crypt", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00004000; + nv_subdev(priv)->intr = nv98_crypt_intr; + nv_engine(priv)->cclass = &nv98_crypt_cclass; + nv_engine(priv)->sclass = nv98_crypt_sclass; + nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush; + nv_falcon(priv)->code.data = nv98_pcrypt_code; + nv_falcon(priv)->code.size = sizeof(nv98_pcrypt_code); + nv_falcon(priv)->data.data = nv98_pcrypt_data; + nv_falcon(priv)->data.size = sizeof(nv98_pcrypt_data); + return 0; +} + +struct nouveau_oclass +nv98_crypt_oclass = { + .handle = NV_ENGINE(CRYPT, 0x98), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv98_crypt_ctor, + .dtor = _nouveau_falcon_dtor, + .init = _nouveau_falcon_init, + .fini = _nouveau_falcon_fini, + .rd32 = _nouveau_falcon_rd32, + .wr32 = _nouveau_falcon_wr32, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c new file mode 100644 index 0000000..4c72571 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c @@ -0,0 +1,477 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <core/device.h> +#include <core/client.h> +#include <core/option.h> + +#include <core/class.h> + +#include <engine/device.h> + +static DEFINE_MUTEX(nv_devices_mutex); +static LIST_HEAD(nv_devices); + +struct nouveau_device * +nouveau_device_find(u64 name) +{ + struct nouveau_device *device, *match = NULL; + mutex_lock(&nv_devices_mutex); + list_for_each_entry(device, &nv_devices, head) { + if (device->handle == name) { + match = device; + break; + } + } + mutex_unlock(&nv_devices_mutex); + return match; +} + +/****************************************************************************** + * nouveau_devobj (0x0080): class implementation + *****************************************************************************/ +struct nouveau_devobj { + struct nouveau_parent base; + struct nouveau_object *subdev[NVDEV_SUBDEV_NR]; +}; + +static const u64 disable_map[] = { + [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS, + [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_BUS] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_IBUS] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE, + [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE, + [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE, + [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO, + [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO, + [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH, + [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG, + [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME, + [NVDEV_ENGINE_VP] = NV_DEVICE_DISABLE_VP, + [NVDEV_ENGINE_CRYPT] = NV_DEVICE_DISABLE_CRYPT, + [NVDEV_ENGINE_BSP] = NV_DEVICE_DISABLE_BSP, + [NVDEV_ENGINE_PPP] = NV_DEVICE_DISABLE_PPP, + [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0, + [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1, + [NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1, + [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC, + [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP, + [NVDEV_SUBDEV_NR] = 0, +}; + +static int +nouveau_devobj_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_client *client = nv_client(parent); + struct nouveau_device *device; + struct nouveau_devobj *devobj; + struct nv_device_class *args = data; + u32 boot0, strap; + u64 disable, mmio_base, mmio_size; + void __iomem *map; + int ret, i, c; + + if (size < sizeof(struct nv_device_class)) + return -EINVAL; + + /* find the device subdev that matches what the client requested */ + device = nv_device(client->device); + if (args->device != ~0) { + device = nouveau_device_find(args->device); + if (!device) + return -ENODEV; + } + + ret = nouveau_parent_create(parent, nv_object(device), oclass, 0, NULL, + (1ULL << NVDEV_ENGINE_DMAOBJ) | + (1ULL << NVDEV_ENGINE_FIFO) | + (1ULL << NVDEV_ENGINE_DISP), &devobj); + *pobject = nv_object(devobj); + if (ret) + return ret; + + mmio_base = pci_resource_start(device->pdev, 0); + mmio_size = pci_resource_len(device->pdev, 0); + + /* translate api disable mask into internal mapping */ + disable = args->debug0; + for (i = 0; i < NVDEV_SUBDEV_NR; i++) { + if (args->disable & disable_map[i]) + disable |= (1ULL << i); + } + + /* identify the chipset, and determine classes of subdev/engines */ + if (!(args->disable & NV_DEVICE_DISABLE_IDENTIFY) && + !device->card_type) { + map = ioremap(mmio_base, 0x102000); + if (map == NULL) + return -ENOMEM; + + /* switch mmio to cpu's native endianness */ +#ifndef __BIG_ENDIAN + if (ioread32_native(map + 0x000004) != 0x00000000) +#else + if (ioread32_native(map + 0x000004) == 0x00000000) +#endif + iowrite32_native(0x01000001, map + 0x000004); + + /* read boot0 and strapping information */ + boot0 = ioread32_native(map + 0x000000); + strap = ioread32_native(map + 0x101000); + iounmap(map); + + /* determine chipset and derive architecture from it */ + if ((boot0 & 0x0f000000) > 0) { + device->chipset = (boot0 & 0xff00000) >> 20; + switch (device->chipset & 0xf0) { + case 0x10: device->card_type = NV_10; break; + case 0x20: device->card_type = NV_20; break; + case 0x30: device->card_type = NV_30; break; + case 0x40: + case 0x60: device->card_type = NV_40; break; + case 0x50: + case 0x80: + case 0x90: + case 0xa0: device->card_type = NV_50; break; + case 0xc0: device->card_type = NV_C0; break; + case 0xd0: device->card_type = NV_D0; break; + case 0xe0: + case 0xf0: device->card_type = NV_E0; break; + default: + break; + } + } else + if ((boot0 & 0xff00fff0) == 0x20004000) { + if (boot0 & 0x00f00000) + device->chipset = 0x05; + else + device->chipset = 0x04; + device->card_type = NV_04; + } + + switch (device->card_type) { + case NV_04: ret = nv04_identify(device); break; + case NV_10: ret = nv10_identify(device); break; + case NV_20: ret = nv20_identify(device); break; + case NV_30: ret = nv30_identify(device); break; + case NV_40: ret = nv40_identify(device); break; + case NV_50: ret = nv50_identify(device); break; + case NV_C0: + case NV_D0: ret = nvc0_identify(device); break; + case NV_E0: ret = nve0_identify(device); break; + default: + ret = -EINVAL; + break; + } + + if (ret) { + nv_error(device, "unknown chipset, 0x%08x\n", boot0); + return ret; + } + + nv_info(device, "BOOT0 : 0x%08x\n", boot0); + nv_info(device, "Chipset: %s (NV%02X)\n", + device->cname, device->chipset); + nv_info(device, "Family : NV%02X\n", device->card_type); + + /* determine frequency of timing crystal */ + if ( device->chipset < 0x17 || + (device->chipset >= 0x20 && device->chipset < 0x25)) + strap &= 0x00000040; + else + strap &= 0x00400040; + + switch (strap) { + case 0x00000000: device->crystal = 13500; break; + case 0x00000040: device->crystal = 14318; break; + case 0x00400000: device->crystal = 27000; break; + case 0x00400040: device->crystal = 25000; break; + } + + nv_debug(device, "crystal freq: %dKHz\n", device->crystal); + } + + if (!(args->disable & NV_DEVICE_DISABLE_MMIO) && + !nv_subdev(device)->mmio) { + nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size); + if (!nv_subdev(device)->mmio) { + nv_error(device, "unable to map device registers\n"); + return -ENOMEM; + } + } + + /* ensure requested subsystems are available for use */ + for (i = 1, c = 1; i < NVDEV_SUBDEV_NR; i++) { + if (!(oclass = device->oclass[i]) || (disable & (1ULL << i))) + continue; + + if (device->subdev[i]) { + nouveau_object_ref(device->subdev[i], + &devobj->subdev[i]); + continue; + } + + ret = nouveau_object_ctor(nv_object(device), NULL, + oclass, NULL, i, + &devobj->subdev[i]); + if (ret == -ENODEV) + continue; + if (ret) + return ret; + + /* note: can't init *any* subdevs until devinit has been run + * due to not knowing exactly what the vbios init tables will + * mess with. devinit also can't be run until all of its + * dependencies have been created. + * + * this code delays init of any subdev until all of devinit's + * dependencies have been created, and then initialises each + * subdev in turn as they're created. + */ + while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) { + struct nouveau_object *subdev = devobj->subdev[c++]; + if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) { + ret = nouveau_object_inc(subdev); + if (ret) + return ret; + atomic_dec(&nv_object(device)->usecount); + } else + if (subdev) { + nouveau_subdev_reset(subdev); + } + } + } + + return 0; +} + +static void +nouveau_devobj_dtor(struct nouveau_object *object) +{ + struct nouveau_devobj *devobj = (void *)object; + int i; + + for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) + nouveau_object_ref(NULL, &devobj->subdev[i]); + + nouveau_parent_destroy(&devobj->base); +} + +static u8 +nouveau_devobj_rd08(struct nouveau_object *object, u64 addr) +{ + return nv_rd08(object->engine, addr); +} + +static u16 +nouveau_devobj_rd16(struct nouveau_object *object, u64 addr) +{ + return nv_rd16(object->engine, addr); +} + +static u32 +nouveau_devobj_rd32(struct nouveau_object *object, u64 addr) +{ + return nv_rd32(object->engine, addr); +} + +static void +nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data) +{ + nv_wr08(object->engine, addr, data); +} + +static void +nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data) +{ + nv_wr16(object->engine, addr, data); +} + +static void +nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data) +{ + nv_wr32(object->engine, addr, data); +} + +static struct nouveau_ofuncs +nouveau_devobj_ofuncs = { + .ctor = nouveau_devobj_ctor, + .dtor = nouveau_devobj_dtor, + .init = _nouveau_parent_init, + .fini = _nouveau_parent_fini, + .rd08 = nouveau_devobj_rd08, + .rd16 = nouveau_devobj_rd16, + .rd32 = nouveau_devobj_rd32, + .wr08 = nouveau_devobj_wr08, + .wr16 = nouveau_devobj_wr16, + .wr32 = nouveau_devobj_wr32, +}; + +/****************************************************************************** + * nouveau_device: engine functions + *****************************************************************************/ +static struct nouveau_oclass +nouveau_device_sclass[] = { + { 0x0080, &nouveau_devobj_ofuncs }, + {} +}; + +static int +nouveau_device_fini(struct nouveau_object *object, bool suspend) +{ + struct nouveau_device *device = (void *)object; + struct nouveau_object *subdev; + int ret, i; + + for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) { + if ((subdev = device->subdev[i])) { + if (!nv_iclass(subdev, NV_ENGINE_CLASS)) { + ret = nouveau_object_dec(subdev, suspend); + if (ret && suspend) + goto fail; + } + } + } + + ret = 0; +fail: + for (; ret && i < NVDEV_SUBDEV_NR; i++) { + if ((subdev = device->subdev[i])) { + if (!nv_iclass(subdev, NV_ENGINE_CLASS)) { + ret = nouveau_object_inc(subdev); + if (ret) { + /* XXX */ + } + } + } + } + + return ret; +} + +static int +nouveau_device_init(struct nouveau_object *object) +{ + struct nouveau_device *device = (void *)object; + struct nouveau_object *subdev; + int ret, i; + + for (i = 0; i < NVDEV_SUBDEV_NR; i++) { + if ((subdev = device->subdev[i])) { + if (!nv_iclass(subdev, NV_ENGINE_CLASS)) { + ret = nouveau_object_inc(subdev); + if (ret) + goto fail; + } else { + nouveau_subdev_reset(subdev); + } + } + } + + ret = 0; +fail: + for (--i; ret && i >= 0; i--) { + if ((subdev = device->subdev[i])) { + if (!nv_iclass(subdev, NV_ENGINE_CLASS)) + nouveau_object_dec(subdev, false); + } + } + + return ret; +} + +static void +nouveau_device_dtor(struct nouveau_object *object) +{ + struct nouveau_device *device = (void *)object; + + mutex_lock(&nv_devices_mutex); + list_del(&device->head); + mutex_unlock(&nv_devices_mutex); + + if (nv_subdev(device)->mmio) + iounmap(nv_subdev(device)->mmio); + + nouveau_engine_destroy(&device->base); +} + +static struct nouveau_oclass +nouveau_device_oclass = { + .handle = NV_ENGINE(DEVICE, 0x00), + .ofuncs = &(struct nouveau_ofuncs) { + .dtor = nouveau_device_dtor, + .init = nouveau_device_init, + .fini = nouveau_device_fini, + }, +}; + +int +nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname, + const char *cfg, const char *dbg, + int length, void **pobject) +{ + struct nouveau_device *device; + int ret = -EEXIST; + + mutex_lock(&nv_devices_mutex); + list_for_each_entry(device, &nv_devices, head) { + if (device->handle == name) + goto done; + } + + ret = nouveau_engine_create_(NULL, NULL, &nouveau_device_oclass, true, + "DEVICE", "device", length, pobject); + device = *pobject; + if (ret) + goto done; + + device->pdev = pdev; + device->handle = name; + device->cfgopt = cfg; + device->dbgopt = dbg; + device->name = sname; + + nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE"); + nv_engine(device)->sclass = nouveau_device_sclass; + list_add(&device->head, &nv_devices); +done: + mutex_unlock(&nv_devices_mutex); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c new file mode 100644 index 0000000..a0284cf --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c @@ -0,0 +1,89 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/bios.h> +#include <subdev/bus.h> +#include <subdev/i2c.h> +#include <subdev/clock.h> +#include <subdev/devinit.h> +#include <subdev/mc.h> +#include <subdev/timer.h> +#include <subdev/fb.h> +#include <subdev/instmem.h> +#include <subdev/vm.h> + +#include <engine/device.h> +#include <engine/dmaobj.h> +#include <engine/fifo.h> +#include <engine/software.h> +#include <engine/graph.h> +#include <engine/disp.h> + +int +nv04_identify(struct nouveau_device *device) +{ + switch (device->chipset) { + case 0x04: + device->cname = "NV04"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x05: + device->cname = "NV05"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + default: + nv_fatal(device, "unknown RIVA chipset\n"); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c new file mode 100644 index 0000000..1b7809a --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c @@ -0,0 +1,204 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/bios.h> +#include <subdev/bus.h> +#include <subdev/gpio.h> +#include <subdev/i2c.h> +#include <subdev/clock.h> +#include <subdev/devinit.h> +#include <subdev/mc.h> +#include <subdev/timer.h> +#include <subdev/fb.h> +#include <subdev/instmem.h> +#include <subdev/vm.h> + +#include <engine/device.h> +#include <engine/dmaobj.h> +#include <engine/fifo.h> +#include <engine/software.h> +#include <engine/graph.h> +#include <engine/disp.h> + +int +nv10_identify(struct nouveau_device *device) +{ + switch (device->chipset) { + case 0x10: + device->cname = "NV10"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x15: + device->cname = "NV15"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x16: + device->cname = "NV16"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x1a: + device->cname = "nForce"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x11: + device->cname = "NV11"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x17: + device->cname = "NV17"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x1f: + device->cname = "nForce2"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x18: + device->cname = "NV18"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + default: + nv_fatal(device, "unknown Celsius chipset\n"); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c new file mode 100644 index 0000000..12a4005 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c @@ -0,0 +1,131 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/bios.h> +#include <subdev/bus.h> +#include <subdev/gpio.h> +#include <subdev/i2c.h> +#include <subdev/clock.h> +#include <subdev/therm.h> +#include <subdev/devinit.h> +#include <subdev/mc.h> +#include <subdev/timer.h> +#include <subdev/fb.h> +#include <subdev/instmem.h> +#include <subdev/vm.h> + +#include <engine/device.h> +#include <engine/dmaobj.h> +#include <engine/fifo.h> +#include <engine/software.h> +#include <engine/graph.h> +#include <engine/disp.h> + +int +nv20_identify(struct nouveau_device *device) +{ + switch (device->chipset) { + case 0x20: + device->cname = "NV20"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x25: + device->cname = "NV25"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x28: + device->cname = "NV28"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x2a: + device->cname = "NV2A"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + default: + nv_fatal(device, "unknown Kelvin chipset\n"); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c new file mode 100644 index 0000000..cef0f1e --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c @@ -0,0 +1,153 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/bios.h> +#include <subdev/bus.h> +#include <subdev/gpio.h> +#include <subdev/i2c.h> +#include <subdev/clock.h> +#include <subdev/devinit.h> +#include <subdev/mc.h> +#include <subdev/timer.h> +#include <subdev/fb.h> +#include <subdev/instmem.h> +#include <subdev/vm.h> + +#include <engine/device.h> +#include <engine/dmaobj.h> +#include <engine/fifo.h> +#include <engine/software.h> +#include <engine/graph.h> +#include <engine/mpeg.h> +#include <engine/disp.h> + +int +nv30_identify(struct nouveau_device *device) +{ + switch (device->chipset) { + case 0x30: + device->cname = "NV30"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x35: + device->cname = "NV35"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv35_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x31: + device->cname = "NV31"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x36: + device->cname = "NV36"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv36_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x34: + device->cname = "NV34"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + default: + nv_fatal(device, "unknown Rankine chipset\n"); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c new file mode 100644 index 0000000..1719cb0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c @@ -0,0 +1,393 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/bios.h> +#include <subdev/bus.h> +#include <subdev/vm.h> +#include <subdev/gpio.h> +#include <subdev/i2c.h> +#include <subdev/clock.h> +#include <subdev/therm.h> +#include <subdev/devinit.h> +#include <subdev/mc.h> +#include <subdev/timer.h> +#include <subdev/fb.h> +#include <subdev/instmem.h> +#include <subdev/vm.h> + +#include <engine/device.h> +#include <engine/dmaobj.h> +#include <engine/fifo.h> +#include <engine/software.h> +#include <engine/graph.h> +#include <engine/mpeg.h> +#include <engine/disp.h> + +int +nv40_identify(struct nouveau_device *device) +{ + switch (device->chipset) { + case 0x40: + device->cname = "NV40"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x41: + device->cname = "NV41"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x42: + device->cname = "NV42"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x43: + device->cname = "NV43"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x45: + device->cname = "NV45"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x47: + device->cname = "G70"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv47_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x49: + device->cname = "G71"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x4b: + device->cname = "G73"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x44: + device->cname = "NV44"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x46: + device->cname = "G72"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x4a: + device->cname = "NV44A"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x4c: + device->cname = "C61"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x4e: + device->cname = "C51"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv4e_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv4e_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x63: + device->cname = "C73"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x67: + device->cname = "C67"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + case 0x68: + device->cname = "C68"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass; + break; + default: + nv_fatal(device, "unknown Curie chipset\n"); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c new file mode 100644 index 0000000..5e8c3de --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c @@ -0,0 +1,425 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/bios.h> +#include <subdev/bus.h> +#include <subdev/gpio.h> +#include <subdev/i2c.h> +#include <subdev/clock.h> +#include <subdev/therm.h> +#include <subdev/mxm.h> +#include <subdev/devinit.h> +#include <subdev/mc.h> +#include <subdev/timer.h> +#include <subdev/fb.h> +#include <subdev/instmem.h> +#include <subdev/vm.h> +#include <subdev/bar.h> + +#include <engine/device.h> +#include <engine/dmaobj.h> +#include <engine/fifo.h> +#include <engine/software.h> +#include <engine/graph.h> +#include <engine/mpeg.h> +#include <engine/vp.h> +#include <engine/crypt.h> +#include <engine/bsp.h> +#include <engine/ppp.h> +#include <engine/copy.h> +#include <engine/disp.h> + +int +nv50_identify(struct nouveau_device *device) +{ + switch (device->chipset) { + case 0x50: + device->cname = "G80"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv50_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; + break; + case 0x84: + device->cname = "G84"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; + break; + case 0x86: + device->cname = "G86"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; + break; + case 0x92: + device->cname = "G92"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass; + break; + case 0x94: + device->cname = "G94"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; + break; + case 0x96: + device->cname = "G96"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; + break; + case 0x98: + device->cname = "G98"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; + break; + case 0xa0: + device->cname = "G200"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nva0_disp_oclass; + break; + case 0xaa: + device->cname = "MCP77/MCP78"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; + break; + case 0xac: + device->cname = "MCP79/MCP7A"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass; + break; + case 0xa3: + device->cname = "GT215"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; + device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + break; + case 0xa5: + device->cname = "GT216"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + break; + case 0xa8: + device->cname = "GT218"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + break; + case 0xaf: + device->cname = "MCP89"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + break; + default: + nv_fatal(device, "unknown Tesla chipset\n"); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c new file mode 100644 index 0000000..a36e64e --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c @@ -0,0 +1,320 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/bios.h> +#include <subdev/bus.h> +#include <subdev/gpio.h> +#include <subdev/i2c.h> +#include <subdev/clock.h> +#include <subdev/therm.h> +#include <subdev/mxm.h> +#include <subdev/devinit.h> +#include <subdev/mc.h> +#include <subdev/timer.h> +#include <subdev/fb.h> +#include <subdev/ltcg.h> +#include <subdev/ibus.h> +#include <subdev/instmem.h> +#include <subdev/vm.h> +#include <subdev/bar.h> + +#include <engine/device.h> +#include <engine/dmaobj.h> +#include <engine/fifo.h> +#include <engine/software.h> +#include <engine/graph.h> +#include <engine/vp.h> +#include <engine/bsp.h> +#include <engine/ppp.h> +#include <engine/copy.h> +#include <engine/disp.h> + +int +nvc0_identify(struct nouveau_device *device) +{ + switch (device->chipset) { + case 0xc0: + device->cname = "GF100"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; + device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + break; + case 0xc4: + device->cname = "GF104"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; + device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + break; + case 0xc3: + device->cname = "GF106"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + break; + case 0xce: + device->cname = "GF114"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; + device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + break; + case 0xcf: + device->cname = "GF116"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; + device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + break; + case 0xc1: + device->cname = "GF108"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + break; + case 0xc8: + device->cname = "GF110"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; + device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; + break; + case 0xd9: + device->cname = "GF119"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; + break; + case 0xd7: + device->cname = "GF117"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; + break; + default: + nv_fatal(device, "unknown Fermi chipset\n"); + return -EINVAL; + } + + return 0; + } diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c new file mode 100644 index 0000000..a354e40 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c @@ -0,0 +1,184 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/bios.h> +#include <subdev/bus.h> +#include <subdev/gpio.h> +#include <subdev/i2c.h> +#include <subdev/clock.h> +#include <subdev/therm.h> +#include <subdev/mxm.h> +#include <subdev/devinit.h> +#include <subdev/mc.h> +#include <subdev/timer.h> +#include <subdev/fb.h> +#include <subdev/ltcg.h> +#include <subdev/ibus.h> +#include <subdev/instmem.h> +#include <subdev/vm.h> +#include <subdev/bar.h> + +#include <engine/device.h> +#include <engine/dmaobj.h> +#include <engine/fifo.h> +#include <engine/software.h> +#include <engine/graph.h> +#include <engine/disp.h> +#include <engine/copy.h> +#include <engine/bsp.h> +#include <engine/vp.h> +#include <engine/ppp.h> + +int +nve0_identify(struct nouveau_device *device) +{ + switch (device->chipset) { + case 0xe4: + device->cname = "GK104"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; + device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + break; + case 0xe7: + device->cname = "GK107"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; + device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + break; + case 0xe6: + device->cname = "GK106"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; + device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + break; + case 0xf0: + device->cname = "GK110"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; +#if 0 + device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass; +#endif + device->oclass[NVDEV_ENGINE_DISP ] = &nvf0_disp_oclass; +#if 0 + device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; + device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; + device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; +#endif + break; + default: + nv_fatal(device, "unknown Kepler chipset\n"); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/drivers/gpu/drm/nouveau/core/engine/disp/base.c new file mode 100644 index 0000000..7a5cae4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/base.c @@ -0,0 +1,52 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <engine/disp.h> + +void +_nouveau_disp_dtor(struct nouveau_object *object) +{ + struct nouveau_disp *disp = (void *)object; + nouveau_event_destroy(&disp->vblank); + nouveau_engine_destroy(&disp->base); +} + +int +nouveau_disp_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, int heads, + const char *intname, const char *extname, + int length, void **pobject) +{ + struct nouveau_disp *disp; + int ret; + + ret = nouveau_engine_create_(parent, engine, oclass, true, + intname, extname, length, pobject); + disp = *pobject; + if (ret) + return ret; + + return nouveau_event_create(heads, &disp->vblank); +} diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c new file mode 100644 index 0000000..a66b27c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c @@ -0,0 +1,98 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> + +#include <subdev/bios.h> +#include <subdev/bios/dcb.h> +#include <subdev/timer.h> + +#include "nv50.h" + +int +nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data) +{ + const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) | + (data & NV50_DISP_DAC_PWR_VSYNC) | + (data & NV50_DISP_DAC_PWR_DATA) | + (data & NV50_DISP_DAC_PWR_STATE); + const u32 doff = (or * 0x800); + nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); + nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat); + nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); + return 0; +} + +int +nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval) +{ + const u32 doff = (or * 0x800); + + nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000); + nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); + + nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval); + mdelay(9); + udelay(500); + loadval = nv_mask(priv, 0x61a00c + doff, 0xffffffff, 0x00000000); + + nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000); + nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); + + nv_debug(priv, "DAC%d sense: 0x%08x\n", or, loadval); + if (!(loadval & 0x80000000)) + return -ETIMEDOUT; + + return (loadval & 0x38000000) >> 27; +} + +int +nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR); + u32 *data = args; + int ret; + + if (size < sizeof(u32)) + return -EINVAL; + + switch (mthd & ~0x3f) { + case NV50_DISP_DAC_PWR: + ret = priv->dac.power(priv, or, data[0]); + break; + case NV50_DISP_DAC_LOAD: + ret = priv->dac.sense(priv, or, data[0]); + if (ret >= 0) { + data[0] = ret; + ret = 0; + } + break; + default: + BUG_ON(1); + } + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c new file mode 100644 index 0000000..31cc8fe --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c @@ -0,0 +1,346 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/bios.h> +#include <subdev/bios/dcb.h> +#include <subdev/bios/dp.h> +#include <subdev/bios/init.h> +#include <subdev/i2c.h> + +#include <engine/disp.h> + +#include "dport.h" + +#define DBG(fmt, args...) nv_debug(dp->disp, "DP:%04x:%04x: " fmt, \ + dp->outp->hasht, dp->outp->hashm, ##args) +#define ERR(fmt, args...) nv_error(dp->disp, "DP:%04x:%04x: " fmt, \ + dp->outp->hasht, dp->outp->hashm, ##args) + +/****************************************************************************** + * link training + *****************************************************************************/ +struct dp_state { + const struct nouveau_dp_func *func; + struct nouveau_disp *disp; + struct dcb_output *outp; + struct nvbios_dpout info; + u8 version; + struct nouveau_i2c_port *aux; + int head; + u8 dpcd[4]; + int link_nr; + u32 link_bw; + u8 stat[6]; + u8 conf[4]; +}; + +static int +dp_set_link_config(struct dp_state *dp) +{ + struct nouveau_disp *disp = dp->disp; + struct nouveau_bios *bios = nouveau_bios(disp); + struct nvbios_init init = { + .subdev = nv_subdev(dp->disp), + .bios = bios, + .offset = 0x0000, + .outp = dp->outp, + .crtc = dp->head, + .execute = 1, + }; + u32 lnkcmp; + u8 sink[2]; + + DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw); + + /* set desired link configuration on the sink */ + sink[0] = dp->link_bw / 27000; + sink[1] = dp->link_nr; + if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP) + sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN; + + nv_wraux(dp->aux, DPCD_LC00, sink, 2); + + /* set desired link configuration on the source */ + if ((lnkcmp = dp->info.lnkcmp)) { + if (dp->version < 0x30) { + while ((dp->link_bw / 10) < nv_ro16(bios, lnkcmp)) + lnkcmp += 4; + init.offset = nv_ro16(bios, lnkcmp + 2); + } else { + while ((dp->link_bw / 27000) < nv_ro08(bios, lnkcmp)) + lnkcmp += 3; + init.offset = nv_ro16(bios, lnkcmp + 1); + } + + nvbios_exec(&init); + } + + return dp->func->lnk_ctl(dp->disp, dp->outp, dp->head, + dp->link_nr, dp->link_bw / 27000, + dp->dpcd[DPCD_RC02] & + DPCD_RC02_ENHANCED_FRAME_CAP); +} + +static void +dp_set_training_pattern(struct dp_state *dp, u8 pattern) +{ + u8 sink_tp; + + DBG("training pattern %d\n", pattern); + dp->func->pattern(dp->disp, dp->outp, dp->head, pattern); + + nv_rdaux(dp->aux, DPCD_LC02, &sink_tp, 1); + sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET; + sink_tp |= pattern; + nv_wraux(dp->aux, DPCD_LC02, &sink_tp, 1); +} + +static int +dp_link_train_commit(struct dp_state *dp) +{ + int i; + + for (i = 0; i < dp->link_nr; i++) { + u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf; + u8 lpre = (lane & 0x0c) >> 2; + u8 lvsw = (lane & 0x03) >> 0; + + dp->conf[i] = (lpre << 3) | lvsw; + if (lvsw == 3) + dp->conf[i] |= DPCD_LC03_MAX_SWING_REACHED; + if (lpre == 3) + dp->conf[i] |= DPCD_LC03_MAX_PRE_EMPHASIS_REACHED; + + DBG("config lane %d %02x\n", i, dp->conf[i]); + dp->func->drv_ctl(dp->disp, dp->outp, dp->head, i, lvsw, lpre); + } + + return nv_wraux(dp->aux, DPCD_LC03(0), dp->conf, 4); +} + +static int +dp_link_train_update(struct dp_state *dp, u32 delay) +{ + int ret; + + udelay(delay); + + ret = nv_rdaux(dp->aux, DPCD_LS02, dp->stat, 6); + if (ret) + return ret; + + DBG("status %*ph\n", 6, dp->stat); + return 0; +} + +static int +dp_link_train_cr(struct dp_state *dp) +{ + bool cr_done = false, abort = false; + int voltage = dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET; + int tries = 0, i; + + dp_set_training_pattern(dp, 1); + + do { + if (dp_link_train_commit(dp) || + dp_link_train_update(dp, 100)) + break; + + cr_done = true; + for (i = 0; i < dp->link_nr; i++) { + u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf; + if (!(lane & DPCD_LS02_LANE0_CR_DONE)) { + cr_done = false; + if (dp->conf[i] & DPCD_LC03_MAX_SWING_REACHED) + abort = true; + break; + } + } + + if ((dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET) != voltage) { + voltage = dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET; + tries = 0; + } + } while (!cr_done && !abort && ++tries < 5); + + return cr_done ? 0 : -1; +} + +static int +dp_link_train_eq(struct dp_state *dp) +{ + bool eq_done = false, cr_done = true; + int tries = 0, i; + + dp_set_training_pattern(dp, 2); + + do { + if (dp_link_train_update(dp, 400)) + break; + + eq_done = !!(dp->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE); + for (i = 0; i < dp->link_nr && eq_done; i++) { + u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf; + if (!(lane & DPCD_LS02_LANE0_CR_DONE)) + cr_done = false; + if (!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) || + !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) + eq_done = false; + } + + if (dp_link_train_commit(dp)) + break; + } while (!eq_done && cr_done && ++tries <= 5); + + return eq_done ? 0 : -1; +} + +static void +dp_link_train_init(struct dp_state *dp, bool spread) +{ + struct nvbios_init init = { + .subdev = nv_subdev(dp->disp), + .bios = nouveau_bios(dp->disp), + .outp = dp->outp, + .crtc = dp->head, + .execute = 1, + }; + + /* set desired spread */ + if (spread) + init.offset = dp->info.script[2]; + else + init.offset = dp->info.script[3]; + nvbios_exec(&init); + + /* pre-train script */ + init.offset = dp->info.script[0]; + nvbios_exec(&init); +} + +static void +dp_link_train_fini(struct dp_state *dp) +{ + struct nvbios_init init = { + .subdev = nv_subdev(dp->disp), + .bios = nouveau_bios(dp->disp), + .outp = dp->outp, + .crtc = dp->head, + .execute = 1, + }; + + /* post-train script */ + init.offset = dp->info.script[1], + nvbios_exec(&init); +} + +int +nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func, + struct dcb_output *outp, int head, u32 datarate) +{ + struct nouveau_bios *bios = nouveau_bios(disp); + struct nouveau_i2c *i2c = nouveau_i2c(disp); + struct dp_state _dp = { + .disp = disp, + .func = func, + .outp = outp, + .head = head, + }, *dp = &_dp; + const u32 bw_list[] = { 270000, 162000, 0 }; + const u32 *link_bw = bw_list; + u8 hdr, cnt, len; + u32 data; + int ret; + + /* find the bios displayport data relevant to this output */ + data = nvbios_dpout_match(bios, outp->hasht, outp->hashm, &dp->version, + &hdr, &cnt, &len, &dp->info); + if (!data) { + ERR("bios data not found\n"); + return -EINVAL; + } + + /* acquire the aux channel and fetch some info about the display */ + if (outp->location) + dp->aux = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(outp->extdev)); + else + dp->aux = i2c->find(i2c, NV_I2C_TYPE_DCBI2C(outp->i2c_index)); + if (!dp->aux) { + ERR("no aux channel?!\n"); + return -ENODEV; + } + + ret = nv_rdaux(dp->aux, 0x00000, dp->dpcd, sizeof(dp->dpcd)); + if (ret) { + ERR("failed to read DPCD\n"); + return ret; + } + + /* adjust required bandwidth for 8B/10B coding overhead */ + datarate = (datarate / 8) * 10; + + /* enable down-spreading and execute pre-train script from vbios */ + dp_link_train_init(dp, dp->dpcd[3] & 0x01); + + /* start off at highest link rate supported by encoder and display */ + while (*link_bw > (dp->dpcd[1] * 27000)) + link_bw++; + + while (link_bw[0]) { + /* find minimum required lane count at this link rate */ + dp->link_nr = dp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT; + while ((dp->link_nr >> 1) * link_bw[0] > datarate) + dp->link_nr >>= 1; + + /* drop link rate to minimum with this lane count */ + while ((link_bw[1] * dp->link_nr) > datarate) + link_bw++; + dp->link_bw = link_bw[0]; + + /* program selected link configuration */ + ret = dp_set_link_config(dp); + if (ret == 0) { + /* attempt to train the link at this configuration */ + memset(dp->stat, 0x00, sizeof(dp->stat)); + if (!dp_link_train_cr(dp) && + !dp_link_train_eq(dp)) + break; + } else + if (ret >= 1) { + /* dp_set_link_config() handled training */ + break; + } + + /* retry at lower rate */ + link_bw++; + } + + /* finish link training */ + dp_set_training_pattern(dp, 0); + + /* execute post-train script from vbios */ + dp_link_train_fini(dp); + return true; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.h b/drivers/gpu/drm/nouveau/core/engine/disp/dport.h new file mode 100644 index 0000000..0e1bbd1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.h @@ -0,0 +1,78 @@ +#ifndef __NVKM_DISP_DPORT_H__ +#define __NVKM_DISP_DPORT_H__ + +/* DPCD Receiver Capabilities */ +#define DPCD_RC00 0x00000 +#define DPCD_RC00_DPCD_REV 0xff +#define DPCD_RC01 0x00001 +#define DPCD_RC01_MAX_LINK_RATE 0xff +#define DPCD_RC02 0x00002 +#define DPCD_RC02_ENHANCED_FRAME_CAP 0x80 +#define DPCD_RC02_MAX_LANE_COUNT 0x1f +#define DPCD_RC03 0x00003 +#define DPCD_RC03_MAX_DOWNSPREAD 0x01 + +/* DPCD Link Configuration */ +#define DPCD_LC00 0x00100 +#define DPCD_LC00_LINK_BW_SET 0xff +#define DPCD_LC01 0x00101 +#define DPCD_LC01_ENHANCED_FRAME_EN 0x80 +#define DPCD_LC01_LANE_COUNT_SET 0x1f +#define DPCD_LC02 0x00102 +#define DPCD_LC02_TRAINING_PATTERN_SET 0x03 +#define DPCD_LC03(l) ((l) + 0x00103) +#define DPCD_LC03_MAX_PRE_EMPHASIS_REACHED 0x20 +#define DPCD_LC03_PRE_EMPHASIS_SET 0x18 +#define DPCD_LC03_MAX_SWING_REACHED 0x04 +#define DPCD_LC03_VOLTAGE_SWING_SET 0x03 + +/* DPCD Link/Sink Status */ +#define DPCD_LS02 0x00202 +#define DPCD_LS02_LANE1_SYMBOL_LOCKED 0x40 +#define DPCD_LS02_LANE1_CHANNEL_EQ_DONE 0x20 +#define DPCD_LS02_LANE1_CR_DONE 0x10 +#define DPCD_LS02_LANE0_SYMBOL_LOCKED 0x04 +#define DPCD_LS02_LANE0_CHANNEL_EQ_DONE 0x02 +#define DPCD_LS02_LANE0_CR_DONE 0x01 +#define DPCD_LS03 0x00203 +#define DPCD_LS03_LANE3_SYMBOL_LOCKED 0x40 +#define DPCD_LS03_LANE3_CHANNEL_EQ_DONE 0x20 +#define DPCD_LS03_LANE3_CR_DONE 0x10 +#define DPCD_LS03_LANE2_SYMBOL_LOCKED 0x04 +#define DPCD_LS03_LANE2_CHANNEL_EQ_DONE 0x02 +#define DPCD_LS03_LANE2_CR_DONE 0x01 +#define DPCD_LS04 0x00204 +#define DPCD_LS04_LINK_STATUS_UPDATED 0x80 +#define DPCD_LS04_DOWNSTREAM_PORT_STATUS_CHANGED 0x40 +#define DPCD_LS04_INTERLANE_ALIGN_DONE 0x01 +#define DPCD_LS06 0x00206 +#define DPCD_LS06_LANE1_PRE_EMPHASIS 0xc0 +#define DPCD_LS06_LANE1_VOLTAGE_SWING 0x30 +#define DPCD_LS06_LANE0_PRE_EMPHASIS 0x0c +#define DPCD_LS06_LANE0_VOLTAGE_SWING 0x03 +#define DPCD_LS07 0x00207 +#define DPCD_LS07_LANE3_PRE_EMPHASIS 0xc0 +#define DPCD_LS07_LANE3_VOLTAGE_SWING 0x30 +#define DPCD_LS07_LANE2_PRE_EMPHASIS 0x0c +#define DPCD_LS07_LANE2_VOLTAGE_SWING 0x03 + +struct nouveau_disp; +struct dcb_output; + +struct nouveau_dp_func { + int (*pattern)(struct nouveau_disp *, struct dcb_output *, + int head, int pattern); + int (*lnk_ctl)(struct nouveau_disp *, struct dcb_output *, int head, + int link_nr, int link_bw, bool enh_frame); + int (*drv_ctl)(struct nouveau_disp *, struct dcb_output *, int head, + int lane, int swing, int preem); +}; + +extern const struct nouveau_dp_func nv94_sor_dp_func; +extern const struct nouveau_dp_func nvd0_sor_dp_func; +extern const struct nouveau_dp_func nv50_pior_dp_func; + +int nouveau_dp_train(struct nouveau_disp *, const struct nouveau_dp_func *, + struct dcb_output *, int, u32); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c new file mode 100644 index 0000000..373dbcc --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c @@ -0,0 +1,48 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> + +#include "nv50.h" + +int +nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) +{ + const u32 soff = (or * 0x800); + int i; + + if (data && data[0]) { + for (i = 0; i < size; i++) + nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]); + nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); + } else + if (data) { + nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001); + } else { + nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000); + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c new file mode 100644 index 0000000..dc57e24 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c @@ -0,0 +1,53 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> + +#include <subdev/bios.h> +#include <subdev/bios/dcb.h> +#include <subdev/bios/dp.h> +#include <subdev/bios/init.h> + +#include "nv50.h" + +int +nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) +{ + const u32 soff = (or * 0x030); + int i; + + if (data && data[0]) { + for (i = 0; i < size; i++) + nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]); + nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); + } else + if (data) { + nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001); + } else { + nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000); + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c new file mode 100644 index 0000000..7fdade6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c @@ -0,0 +1,70 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> + +#include "nv50.h" + +int +nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) +{ + const u32 hoff = (head * 0x800); + + if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { + nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000); + nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000); + nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000); + return 0; + } + + /* AVI InfoFrame */ + nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000); + nv_wr32(priv, 0x616528 + hoff, 0x000d0282); + nv_wr32(priv, 0x61652c + hoff, 0x0000006f); + nv_wr32(priv, 0x616530 + hoff, 0x00000000); + nv_wr32(priv, 0x616534 + hoff, 0x00000000); + nv_wr32(priv, 0x616538 + hoff, 0x00000000); + nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001); + + /* Audio InfoFrame */ + nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000); + nv_wr32(priv, 0x616508 + hoff, 0x000a0184); + nv_wr32(priv, 0x61650c + hoff, 0x00000071); + nv_wr32(priv, 0x616510 + hoff, 0x00000000); + nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001); + + nv_mask(priv, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */ + nv_mask(priv, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */ + nv_mask(priv, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */ + + /* ??? */ + nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ + nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ + nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ + + /* HDMI_CTRL */ + nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c new file mode 100644 index 0000000..db8c6fd --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c @@ -0,0 +1,70 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> + +#include "nv50.h" + +int +nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) +{ + const u32 soff = (or * 0x800); + + if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { + nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000); + nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000); + nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000); + return 0; + } + + /* AVI InfoFrame */ + nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000); + nv_wr32(priv, 0x61c528 + soff, 0x000d0282); + nv_wr32(priv, 0x61c52c + soff, 0x0000006f); + nv_wr32(priv, 0x61c530 + soff, 0x00000000); + nv_wr32(priv, 0x61c534 + soff, 0x00000000); + nv_wr32(priv, 0x61c538 + soff, 0x00000000); + nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001); + + /* Audio InfoFrame */ + nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000); + nv_wr32(priv, 0x61c508 + soff, 0x000a0184); + nv_wr32(priv, 0x61c50c + soff, 0x00000071); + nv_wr32(priv, 0x61c510 + soff, 0x00000000); + nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001); + + nv_mask(priv, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */ + nv_mask(priv, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */ + nv_mask(priv, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */ + + /* ??? */ + nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ + nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ + nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ + + /* HDMI_CTRL */ + nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c new file mode 100644 index 0000000..5151bb2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c @@ -0,0 +1,62 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> + +#include "nv50.h" + +int +nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) +{ + const u32 hoff = (head * 0x800); + + if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { + nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000); + nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000); + nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000); + return 0; + } + + /* AVI InfoFrame */ + nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000); + nv_wr32(priv, 0x61671c + hoff, 0x000d0282); + nv_wr32(priv, 0x616720 + hoff, 0x0000006f); + nv_wr32(priv, 0x616724 + hoff, 0x00000000); + nv_wr32(priv, 0x616728 + hoff, 0x00000000); + nv_wr32(priv, 0x61672c + hoff, 0x00000000); + nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001); + + /* ??? InfoFrame? */ + nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000); + nv_wr32(priv, 0x6167ac + hoff, 0x00000010); + nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001); + + /* HDMI_CTRL */ + nv_mask(priv, 0x616798 + hoff, 0x401f007f, data); + + /* NFI, audio doesn't work without it though.. */ + nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c new file mode 100644 index 0000000..05e903f --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c @@ -0,0 +1,105 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <engine/disp.h> + +#include <core/event.h> +#include <core/class.h> + +struct nv04_disp_priv { + struct nouveau_disp base; +}; + +static struct nouveau_oclass +nv04_disp_sclass[] = { + { NV04_DISP_CLASS, &nouveau_object_ofuncs }, + {}, +}; + +/******************************************************************************* + * Display engine implementation + ******************************************************************************/ + +static void +nv04_disp_vblank_enable(struct nouveau_event *event, int head) +{ + nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000001); +} + +static void +nv04_disp_vblank_disable(struct nouveau_event *event, int head) +{ + nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000000); +} + +static void +nv04_disp_intr(struct nouveau_subdev *subdev) +{ + struct nv04_disp_priv *priv = (void *)subdev; + u32 crtc0 = nv_rd32(priv, 0x600100); + u32 crtc1 = nv_rd32(priv, 0x602100); + + if (crtc0 & 0x00000001) { + nouveau_event_trigger(priv->base.vblank, 0); + nv_wr32(priv, 0x600100, 0x00000001); + } + + if (crtc1 & 0x00000001) { + nouveau_event_trigger(priv->base.vblank, 1); + nv_wr32(priv, 0x602100, 0x00000001); + } +} + +static int +nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_disp_priv *priv; + int ret; + + ret = nouveau_disp_create(parent, engine, oclass, 2, "DISPLAY", + "display", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_engine(priv)->sclass = nv04_disp_sclass; + nv_subdev(priv)->intr = nv04_disp_intr; + priv->base.vblank->priv = priv; + priv->base.vblank->enable = nv04_disp_vblank_enable; + priv->base.vblank->disable = nv04_disp_vblank_disable; + return 0; +} + +struct nouveau_oclass +nv04_disp_oclass = { + .handle = NV_ENGINE(DISP, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_disp_ctor, + .dtor = _nouveau_disp_dtor, + .init = _nouveau_disp_init, + .fini = _nouveau_disp_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c new file mode 100644 index 0000000..5680d3e --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c @@ -0,0 +1,1328 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <core/parent.h> +#include <core/handle.h> +#include <core/class.h> + +#include <engine/disp.h> + +#include <subdev/bios.h> +#include <subdev/bios/dcb.h> +#include <subdev/bios/disp.h> +#include <subdev/bios/init.h> +#include <subdev/bios/pll.h> +#include <subdev/timer.h> +#include <subdev/fb.h> +#include <subdev/clock.h> + +#include "nv50.h" + +/******************************************************************************* + * EVO channel base class + ******************************************************************************/ + +int +nv50_disp_chan_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, int chid, + int length, void **pobject) +{ + struct nv50_disp_base *base = (void *)parent; + struct nv50_disp_chan *chan; + int ret; + + if (base->chan & (1 << chid)) + return -EBUSY; + base->chan |= (1 << chid); + + ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL, + (1ULL << NVDEV_ENGINE_DMAOBJ), + length, pobject); + chan = *pobject; + if (ret) + return ret; + + chan->chid = chid; + return 0; +} + +void +nv50_disp_chan_destroy(struct nv50_disp_chan *chan) +{ + struct nv50_disp_base *base = (void *)nv_object(chan)->parent; + base->chan &= ~(1 << chan->chid); + nouveau_namedb_destroy(&chan->base); +} + +u32 +nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_chan *chan = (void *)object; + return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr); +} + +void +nv50_disp_chan_wr32(struct nouveau_object *object, u64 addr, u32 data) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_chan *chan = (void *)object; + nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data); +} + +/******************************************************************************* + * EVO DMA channel base class + ******************************************************************************/ + +static int +nv50_disp_dmac_object_attach(struct nouveau_object *parent, + struct nouveau_object *object, u32 name) +{ + struct nv50_disp_base *base = (void *)parent->parent; + struct nv50_disp_chan *chan = (void *)parent; + u32 addr = nv_gpuobj(object)->node->offset; + u32 chid = chan->chid; + u32 data = (chid << 28) | (addr << 10) | chid; + return nouveau_ramht_insert(base->ramht, chid, name, data); +} + +static void +nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie) +{ + struct nv50_disp_base *base = (void *)parent->parent; + nouveau_ramht_remove(base->ramht, cookie); +} + +int +nv50_disp_dmac_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, u32 pushbuf, int chid, + int length, void **pobject) +{ + struct nv50_disp_dmac *dmac; + int ret; + + ret = nv50_disp_chan_create_(parent, engine, oclass, chid, + length, pobject); + dmac = *pobject; + if (ret) + return ret; + + dmac->pushdma = (void *)nouveau_handle_ref(parent, pushbuf); + if (!dmac->pushdma) + return -ENOENT; + + switch (nv_mclass(dmac->pushdma)) { + case 0x0002: + case 0x003d: + if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff) + return -EINVAL; + + switch (dmac->pushdma->target) { + case NV_MEM_TARGET_VRAM: + dmac->push = 0x00000000 | dmac->pushdma->start >> 8; + break; + case NV_MEM_TARGET_PCI_NOSNOOP: + dmac->push = 0x00000003 | dmac->pushdma->start >> 8; + break; + default: + return -EINVAL; + } + break; + default: + return -EINVAL; + } + + return 0; +} + +void +nv50_disp_dmac_dtor(struct nouveau_object *object) +{ + struct nv50_disp_dmac *dmac = (void *)object; + nouveau_object_ref(NULL, (struct nouveau_object **)&dmac->pushdma); + nv50_disp_chan_destroy(&dmac->base); +} + +static int +nv50_disp_dmac_init(struct nouveau_object *object) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_dmac *dmac = (void *)object; + int chid = dmac->base.chid; + int ret; + + ret = nv50_disp_chan_init(&dmac->base); + if (ret) + return ret; + + /* enable error reporting */ + nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00010001 << chid); + + /* initialise channel for dma command submission */ + nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push); + nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000); + nv_wr32(priv, 0x61020c + (chid * 0x0010), chid); + nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010); + nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000); + nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013); + + /* wait for it to go inactive */ + if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) { + nv_error(dmac, "init timeout, 0x%08x\n", + nv_rd32(priv, 0x610200 + (chid * 0x10))); + return -EBUSY; + } + + return 0; +} + +static int +nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_dmac *dmac = (void *)object; + int chid = dmac->base.chid; + + /* deactivate channel */ + nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000); + nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000); + if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) { + nv_error(dmac, "fini timeout, 0x%08x\n", + nv_rd32(priv, 0x610200 + (chid * 0x10))); + if (suspend) + return -EBUSY; + } + + /* disable error reporting */ + nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid); + + return nv50_disp_chan_fini(&dmac->base, suspend); +} + +/******************************************************************************* + * EVO master channel object + ******************************************************************************/ + +static int +nv50_disp_mast_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_display_mast_class *args = data; + struct nv50_disp_dmac *mast; + int ret; + + if (size < sizeof(*args)) + return -EINVAL; + + ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, + 0, sizeof(*mast), (void **)&mast); + *pobject = nv_object(mast); + if (ret) + return ret; + + nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach; + nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach; + return 0; +} + +static int +nv50_disp_mast_init(struct nouveau_object *object) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_dmac *mast = (void *)object; + int ret; + + ret = nv50_disp_chan_init(&mast->base); + if (ret) + return ret; + + /* enable error reporting */ + nv_mask(priv, 0x610028, 0x00010001, 0x00010001); + + /* attempt to unstick channel from some unknown state */ + if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000) + nv_mask(priv, 0x610200, 0x00800000, 0x00800000); + if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000) + nv_mask(priv, 0x610200, 0x00600000, 0x00600000); + + /* initialise channel for dma command submission */ + nv_wr32(priv, 0x610204, mast->push); + nv_wr32(priv, 0x610208, 0x00010000); + nv_wr32(priv, 0x61020c, 0x00000000); + nv_mask(priv, 0x610200, 0x00000010, 0x00000010); + nv_wr32(priv, 0x640000, 0x00000000); + nv_wr32(priv, 0x610200, 0x01000013); + + /* wait for it to go inactive */ + if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) { + nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200)); + return -EBUSY; + } + + return 0; +} + +static int +nv50_disp_mast_fini(struct nouveau_object *object, bool suspend) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_dmac *mast = (void *)object; + + /* deactivate channel */ + nv_mask(priv, 0x610200, 0x00000010, 0x00000000); + nv_mask(priv, 0x610200, 0x00000003, 0x00000000); + if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) { + nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200)); + if (suspend) + return -EBUSY; + } + + /* disable error reporting */ + nv_mask(priv, 0x610028, 0x00010001, 0x00000000); + + return nv50_disp_chan_fini(&mast->base, suspend); +} + +struct nouveau_ofuncs +nv50_disp_mast_ofuncs = { + .ctor = nv50_disp_mast_ctor, + .dtor = nv50_disp_dmac_dtor, + .init = nv50_disp_mast_init, + .fini = nv50_disp_mast_fini, + .rd32 = nv50_disp_chan_rd32, + .wr32 = nv50_disp_chan_wr32, +}; + +/******************************************************************************* + * EVO sync channel objects + ******************************************************************************/ + +static int +nv50_disp_sync_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_display_sync_class *args = data; + struct nv50_disp_dmac *dmac; + int ret; + + if (size < sizeof(*args) || args->head > 1) + return -EINVAL; + + ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, + 1 + args->head, sizeof(*dmac), + (void **)&dmac); + *pobject = nv_object(dmac); + if (ret) + return ret; + + nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach; + nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach; + return 0; +} + +struct nouveau_ofuncs +nv50_disp_sync_ofuncs = { + .ctor = nv50_disp_sync_ctor, + .dtor = nv50_disp_dmac_dtor, + .init = nv50_disp_dmac_init, + .fini = nv50_disp_dmac_fini, + .rd32 = nv50_disp_chan_rd32, + .wr32 = nv50_disp_chan_wr32, +}; + +/******************************************************************************* + * EVO overlay channel objects + ******************************************************************************/ + +static int +nv50_disp_ovly_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_display_ovly_class *args = data; + struct nv50_disp_dmac *dmac; + int ret; + + if (size < sizeof(*args) || args->head > 1) + return -EINVAL; + + ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, + 3 + args->head, sizeof(*dmac), + (void **)&dmac); + *pobject = nv_object(dmac); + if (ret) + return ret; + + nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach; + nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach; + return 0; +} + +struct nouveau_ofuncs +nv50_disp_ovly_ofuncs = { + .ctor = nv50_disp_ovly_ctor, + .dtor = nv50_disp_dmac_dtor, + .init = nv50_disp_dmac_init, + .fini = nv50_disp_dmac_fini, + .rd32 = nv50_disp_chan_rd32, + .wr32 = nv50_disp_chan_wr32, +}; + +/******************************************************************************* + * EVO PIO channel base class + ******************************************************************************/ + +static int +nv50_disp_pioc_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, int chid, + int length, void **pobject) +{ + return nv50_disp_chan_create_(parent, engine, oclass, chid, + length, pobject); +} + +static void +nv50_disp_pioc_dtor(struct nouveau_object *object) +{ + struct nv50_disp_pioc *pioc = (void *)object; + nv50_disp_chan_destroy(&pioc->base); +} + +static int +nv50_disp_pioc_init(struct nouveau_object *object) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_pioc *pioc = (void *)object; + int chid = pioc->base.chid; + int ret; + + ret = nv50_disp_chan_init(&pioc->base); + if (ret) + return ret; + + nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000); + if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) { + nv_error(pioc, "timeout0: 0x%08x\n", + nv_rd32(priv, 0x610200 + (chid * 0x10))); + return -EBUSY; + } + + nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001); + if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) { + nv_error(pioc, "timeout1: 0x%08x\n", + nv_rd32(priv, 0x610200 + (chid * 0x10))); + return -EBUSY; + } + + return 0; +} + +static int +nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_pioc *pioc = (void *)object; + int chid = pioc->base.chid; + + nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000); + if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) { + nv_error(pioc, "timeout: 0x%08x\n", + nv_rd32(priv, 0x610200 + (chid * 0x10))); + if (suspend) + return -EBUSY; + } + + return nv50_disp_chan_fini(&pioc->base, suspend); +} + +/******************************************************************************* + * EVO immediate overlay channel objects + ******************************************************************************/ + +static int +nv50_disp_oimm_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_display_oimm_class *args = data; + struct nv50_disp_pioc *pioc; + int ret; + + if (size < sizeof(*args) || args->head > 1) + return -EINVAL; + + ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head, + sizeof(*pioc), (void **)&pioc); + *pobject = nv_object(pioc); + if (ret) + return ret; + + return 0; +} + +struct nouveau_ofuncs +nv50_disp_oimm_ofuncs = { + .ctor = nv50_disp_oimm_ctor, + .dtor = nv50_disp_pioc_dtor, + .init = nv50_disp_pioc_init, + .fini = nv50_disp_pioc_fini, + .rd32 = nv50_disp_chan_rd32, + .wr32 = nv50_disp_chan_wr32, +}; + +/******************************************************************************* + * EVO cursor channel objects + ******************************************************************************/ + +static int +nv50_disp_curs_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_display_curs_class *args = data; + struct nv50_disp_pioc *pioc; + int ret; + + if (size < sizeof(*args) || args->head > 1) + return -EINVAL; + + ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head, + sizeof(*pioc), (void **)&pioc); + *pobject = nv_object(pioc); + if (ret) + return ret; + + return 0; +} + +struct nouveau_ofuncs +nv50_disp_curs_ofuncs = { + .ctor = nv50_disp_curs_ctor, + .dtor = nv50_disp_pioc_dtor, + .init = nv50_disp_pioc_init, + .fini = nv50_disp_pioc_fini, + .rd32 = nv50_disp_chan_rd32, + .wr32 = nv50_disp_chan_wr32, +}; + +/******************************************************************************* + * Base display object + ******************************************************************************/ + +static void +nv50_disp_base_vblank_enable(struct nouveau_event *event, int head) +{ + nv_mask(event->priv, 0x61002c, (4 << head), (4 << head)); +} + +static void +nv50_disp_base_vblank_disable(struct nouveau_event *event, int head) +{ + nv_mask(event->priv, 0x61002c, (4 << head), 0); +} + +static int +nv50_disp_base_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_disp_priv *priv = (void *)engine; + struct nv50_disp_base *base; + int ret; + + ret = nouveau_parent_create(parent, engine, oclass, 0, + priv->sclass, 0, &base); + *pobject = nv_object(base); + if (ret) + return ret; + + priv->base.vblank->priv = priv; + priv->base.vblank->enable = nv50_disp_base_vblank_enable; + priv->base.vblank->disable = nv50_disp_base_vblank_disable; + return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0, + &base->ramht); +} + +static void +nv50_disp_base_dtor(struct nouveau_object *object) +{ + struct nv50_disp_base *base = (void *)object; + nouveau_ramht_ref(NULL, &base->ramht); + nouveau_parent_destroy(&base->base); +} + +static int +nv50_disp_base_init(struct nouveau_object *object) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_base *base = (void *)object; + int ret, i; + u32 tmp; + + ret = nouveau_parent_init(&base->base); + if (ret) + return ret; + + /* The below segments of code copying values from one register to + * another appear to inform EVO of the display capabilities or + * something similar. NFI what the 0x614004 caps are for.. + */ + tmp = nv_rd32(priv, 0x614004); + nv_wr32(priv, 0x610184, tmp); + + /* ... CRTC caps */ + for (i = 0; i < priv->head.nr; i++) { + tmp = nv_rd32(priv, 0x616100 + (i * 0x800)); + nv_wr32(priv, 0x610190 + (i * 0x10), tmp); + tmp = nv_rd32(priv, 0x616104 + (i * 0x800)); + nv_wr32(priv, 0x610194 + (i * 0x10), tmp); + tmp = nv_rd32(priv, 0x616108 + (i * 0x800)); + nv_wr32(priv, 0x610198 + (i * 0x10), tmp); + tmp = nv_rd32(priv, 0x61610c + (i * 0x800)); + nv_wr32(priv, 0x61019c + (i * 0x10), tmp); + } + + /* ... DAC caps */ + for (i = 0; i < priv->dac.nr; i++) { + tmp = nv_rd32(priv, 0x61a000 + (i * 0x800)); + nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp); + } + + /* ... SOR caps */ + for (i = 0; i < priv->sor.nr; i++) { + tmp = nv_rd32(priv, 0x61c000 + (i * 0x800)); + nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp); + } + + /* ... PIOR caps */ + for (i = 0; i < 3; i++) { + tmp = nv_rd32(priv, 0x61e000 + (i * 0x800)); + nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp); + } + + /* steal display away from vbios, or something like that */ + if (nv_rd32(priv, 0x610024) & 0x00000100) { + nv_wr32(priv, 0x610024, 0x00000100); + nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000); + if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) { + nv_error(priv, "timeout acquiring display\n"); + return -EBUSY; + } + } + + /* point at display engine memory area (hash table, objects) */ + nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9); + + /* enable supervisor interrupts, disable everything else */ + nv_wr32(priv, 0x61002c, 0x00000370); + nv_wr32(priv, 0x610028, 0x00000000); + return 0; +} + +static int +nv50_disp_base_fini(struct nouveau_object *object, bool suspend) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_base *base = (void *)object; + + /* disable all interrupts */ + nv_wr32(priv, 0x610024, 0x00000000); + nv_wr32(priv, 0x610020, 0x00000000); + + return nouveau_parent_fini(&base->base, suspend); +} + +struct nouveau_ofuncs +nv50_disp_base_ofuncs = { + .ctor = nv50_disp_base_ctor, + .dtor = nv50_disp_base_dtor, + .init = nv50_disp_base_init, + .fini = nv50_disp_base_fini, +}; + +static struct nouveau_omthds +nv50_disp_base_omthds[] = { + { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, + { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, + { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, + { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, + { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, + { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd }, + { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd }, + {}, +}; + +static struct nouveau_oclass +nv50_disp_base_oclass[] = { + { NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds }, + {} +}; + +static struct nouveau_oclass +nv50_disp_sclass[] = { + { NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, + { NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, + { NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, + { NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, + { NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, + {} +}; + +/******************************************************************************* + * Display context, tracks instmem allocation and prevents more than one + * client using the display hardware at any time. + ******************************************************************************/ + +static int +nv50_disp_data_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_disp_priv *priv = (void *)engine; + struct nouveau_engctx *ectx; + int ret = -EBUSY; + + /* no context needed for channel objects... */ + if (nv_mclass(parent) != NV_DEVICE_CLASS) { + atomic_inc(&parent->refcount); + *pobject = parent; + return 1; + } + + /* allocate display hardware to client */ + mutex_lock(&nv_subdev(priv)->mutex); + if (list_empty(&nv_engine(priv)->contexts)) { + ret = nouveau_engctx_create(parent, engine, oclass, NULL, + 0x10000, 0x10000, + NVOBJ_FLAG_HEAP, &ectx); + *pobject = nv_object(ectx); + } + mutex_unlock(&nv_subdev(priv)->mutex); + return ret; +} + +struct nouveau_oclass +nv50_disp_cclass = { + .handle = NV_ENGCTX(DISP, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_disp_data_ctor, + .dtor = _nouveau_engctx_dtor, + .init = _nouveau_engctx_init, + .fini = _nouveau_engctx_fini, + .rd32 = _nouveau_engctx_rd32, + .wr32 = _nouveau_engctx_wr32, + }, +}; + +/******************************************************************************* + * Display engine implementation + ******************************************************************************/ + +static void +nv50_disp_intr_error(struct nv50_disp_priv *priv) +{ + u32 channels = (nv_rd32(priv, 0x610020) & 0x001f0000) >> 16; + u32 addr, data; + int chid; + + for (chid = 0; chid < 5; chid++) { + if (!(channels & (1 << chid))) + continue; + + nv_wr32(priv, 0x610020, 0x00010000 << chid); + addr = nv_rd32(priv, 0x610080 + (chid * 0x08)); + data = nv_rd32(priv, 0x610084 + (chid * 0x08)); + nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000); + + nv_error(priv, "chid %d mthd 0x%04x data 0x%08x 0x%08x\n", + chid, addr & 0xffc, data, addr); + } +} + +static u16 +exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, + struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_outp *info) +{ + struct nouveau_bios *bios = nouveau_bios(priv); + u16 mask, type, data; + + if (outp < 4) { + type = DCB_OUTPUT_ANALOG; + mask = 0; + } else + if (outp < 8) { + switch (ctrl & 0x00000f00) { + case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break; + case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break; + case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break; + case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break; + case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break; + case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; + default: + nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl); + return 0x0000; + } + outp -= 4; + } else { + outp = outp - 8; + type = 0x0010; + mask = 0; + switch (ctrl & 0x00000f00) { + case 0x00000000: type |= priv->pior.type[outp]; break; + default: + nv_error(priv, "unknown PIOR mc 0x%08x\n", ctrl); + return 0x0000; + } + } + + mask = 0x00c0 & (mask << 6); + mask |= 0x0001 << outp; + mask |= 0x0100 << head; + + data = dcb_outp_match(bios, type, mask, ver, hdr, dcb); + if (!data) + return 0x0000; + + /* off-chip encoders require matching the exact encoder type */ + if (dcb->location != 0) + type |= dcb->extdev << 8; + + return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info); +} + +static bool +exec_script(struct nv50_disp_priv *priv, int head, int id) +{ + struct nouveau_bios *bios = nouveau_bios(priv); + struct nvbios_outp info; + struct dcb_output dcb; + u8 ver, hdr, cnt, len; + u16 data; + u32 ctrl = 0x00000000; + int i; + + /* DAC */ + for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) + ctrl = nv_rd32(priv, 0x610b5c + (i * 8)); + + /* SOR */ + if (!(ctrl & (1 << head))) { + if (nv_device(priv)->chipset < 0x90 || + nv_device(priv)->chipset == 0x92 || + nv_device(priv)->chipset == 0xa0) { + for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) + ctrl = nv_rd32(priv, 0x610b74 + (i * 8)); + i += 4; + } else { + for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) + ctrl = nv_rd32(priv, 0x610798 + (i * 8)); + i += 4; + } + } + + /* PIOR */ + if (!(ctrl & (1 << head))) { + for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) + ctrl = nv_rd32(priv, 0x610b84 + (i * 8)); + i += 8; + } + + if (!(ctrl & (1 << head))) + return false; + i--; + + data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info); + if (data) { + struct nvbios_init init = { + .subdev = nv_subdev(priv), + .bios = bios, + .offset = info.script[id], + .outp = &dcb, + .crtc = head, + .execute = 1, + }; + + return nvbios_exec(&init) == 0; + } + + return false; +} + +static u32 +exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, + struct dcb_output *outp) +{ + struct nouveau_bios *bios = nouveau_bios(priv); + struct nvbios_outp info1; + struct nvbios_ocfg info2; + u8 ver, hdr, cnt, len; + u32 ctrl = 0x00000000; + u32 data, conf = ~0; + int i; + + /* DAC */ + for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) + ctrl = nv_rd32(priv, 0x610b58 + (i * 8)); + + /* SOR */ + if (!(ctrl & (1 << head))) { + if (nv_device(priv)->chipset < 0x90 || + nv_device(priv)->chipset == 0x92 || + nv_device(priv)->chipset == 0xa0) { + for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) + ctrl = nv_rd32(priv, 0x610b70 + (i * 8)); + i += 4; + } else { + for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) + ctrl = nv_rd32(priv, 0x610794 + (i * 8)); + i += 4; + } + } + + /* PIOR */ + if (!(ctrl & (1 << head))) { + for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) + ctrl = nv_rd32(priv, 0x610b80 + (i * 8)); + i += 8; + } + + if (!(ctrl & (1 << head))) + return conf; + i--; + + data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1); + if (!data) + return conf; + + if (outp->location == 0) { + switch (outp->type) { + case DCB_OUTPUT_TMDS: + conf = (ctrl & 0x00000f00) >> 8; + if (pclk >= 165000) + conf |= 0x0100; + break; + case DCB_OUTPUT_LVDS: + conf = priv->sor.lvdsconf; + break; + case DCB_OUTPUT_DP: + conf = (ctrl & 0x00000f00) >> 8; + break; + case DCB_OUTPUT_ANALOG: + default: + conf = 0x00ff; + break; + } + } else { + conf = (ctrl & 0x00000f00) >> 8; + pclk = pclk / 2; + } + + data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2); + if (data && id < 0xff) { + data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); + if (data) { + struct nvbios_init init = { + .subdev = nv_subdev(priv), + .bios = bios, + .offset = data, + .outp = outp, + .crtc = head, + .execute = 1, + }; + + nvbios_exec(&init); + } + } + + return conf; +} + +static void +nv50_disp_intr_unk10_0(struct nv50_disp_priv *priv, int head) +{ + exec_script(priv, head, 1); +} + +static void +nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head) +{ + exec_script(priv, head, 2); +} + +static void +nv50_disp_intr_unk20_1(struct nv50_disp_priv *priv, int head) +{ + struct nouveau_clock *clk = nouveau_clock(priv); + u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; + if (pclk) + clk->pll_set(clk, PLL_VPLL0 + head, pclk); +} + +static void +nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, + struct dcb_output *outp, u32 pclk) +{ + const int link = !(outp->sorconf.link & 1); + const int or = ffs(outp->or) - 1; + const u32 soff = ( or * 0x800); + const u32 loff = (link * 0x080) + soff; + const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8)); + const u32 symbol = 100000; + u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x0000f0000; + u32 clksor = nv_rd32(priv, 0x614300 + soff); + int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0; + int TU, VTUi, VTUf, VTUa; + u64 link_data_rate, link_ratio, unk; + u32 best_diff = 64 * symbol; + u32 link_nr, link_bw, bits, r; + + /* calculate packed data rate for each lane */ + if (dpctrl > 0x00030000) link_nr = 4; + else if (dpctrl > 0x00010000) link_nr = 2; + else link_nr = 1; + + if (clksor & 0x000c0000) + link_bw = 270000; + else + link_bw = 162000; + + if ((ctrl & 0xf0000) == 0x60000) bits = 30; + else if ((ctrl & 0xf0000) == 0x50000) bits = 24; + else bits = 18; + + link_data_rate = (pclk * bits / 8) / link_nr; + + /* calculate ratio of packed data rate to link symbol rate */ + link_ratio = link_data_rate * symbol; + r = do_div(link_ratio, link_bw); + + for (TU = 64; TU >= 32; TU--) { + /* calculate average number of valid symbols in each TU */ + u32 tu_valid = link_ratio * TU; + u32 calc, diff; + + /* find a hw representation for the fraction.. */ + VTUi = tu_valid / symbol; + calc = VTUi * symbol; + diff = tu_valid - calc; + if (diff) { + if (diff >= (symbol / 2)) { + VTUf = symbol / (symbol - diff); + if (symbol - (VTUf * diff)) + VTUf++; + + if (VTUf <= 15) { + VTUa = 1; + calc += symbol - (symbol / VTUf); + } else { + VTUa = 0; + VTUf = 1; + calc += symbol; + } + } else { + VTUa = 0; + VTUf = min((int)(symbol / diff), 15); + calc += symbol / VTUf; + } + + diff = calc - tu_valid; + } else { + /* no remainder, but the hw doesn't like the fractional + * part to be zero. decrement the integer part and + * have the fraction add a whole symbol back + */ + VTUa = 0; + VTUf = 1; + VTUi--; + } + + if (diff < best_diff) { + best_diff = diff; + bestTU = TU; + bestVTUa = VTUa; + bestVTUf = VTUf; + bestVTUi = VTUi; + if (diff == 0) + break; + } + } + + if (!bestTU) { + nv_error(priv, "unable to find suitable dp config\n"); + return; + } + + /* XXX close to vbios numbers, but not right */ + unk = (symbol - link_ratio) * bestTU; + unk *= link_ratio; + r = do_div(unk, symbol); + r = do_div(unk, symbol); + unk += 6; + + nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2); + nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 | + bestVTUf << 16 | + bestVTUi << 8 | unk); +} + +static void +nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head) +{ + struct dcb_output outp; + u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; + u32 hval, hreg = 0x614200 + (head * 0x800); + u32 oval, oreg; + u32 mask; + u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp); + if (conf != ~0) { + if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) { + u32 soff = (ffs(outp.or) - 1) * 0x08; + u32 ctrl = nv_rd32(priv, 0x610798 + soff); + u32 datarate; + + switch ((ctrl & 0x000f0000) >> 16) { + case 6: datarate = pclk * 30 / 8; break; + case 5: datarate = pclk * 24 / 8; break; + case 2: + default: + datarate = pclk * 18 / 8; + break; + } + + nouveau_dp_train(&priv->base, priv->sor.dp, + &outp, head, datarate); + } + + exec_clkcmp(priv, head, 0, pclk, &outp); + + if (!outp.location && outp.type == DCB_OUTPUT_ANALOG) { + oreg = 0x614280 + (ffs(outp.or) - 1) * 0x800; + oval = 0x00000000; + hval = 0x00000000; + mask = 0xffffffff; + } else + if (!outp.location) { + if (outp.type == DCB_OUTPUT_DP) + nv50_disp_intr_unk20_2_dp(priv, &outp, pclk); + oreg = 0x614300 + (ffs(outp.or) - 1) * 0x800; + oval = (conf & 0x0100) ? 0x00000101 : 0x00000000; + hval = 0x00000000; + mask = 0x00000707; + } else { + oreg = 0x614380 + (ffs(outp.or) - 1) * 0x800; + oval = 0x00000001; + hval = 0x00000001; + mask = 0x00000707; + } + + nv_mask(priv, hreg, 0x0000000f, hval); + nv_mask(priv, oreg, mask, oval); + } +} + +/* If programming a TMDS output on a SOR that can also be configured for + * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off. + * + * It looks like the VBIOS TMDS scripts make an attempt at this, however, + * the VBIOS scripts on at least one board I have only switch it off on + * link 0, causing a blank display if the output has previously been + * programmed for DisplayPort. + */ +static void +nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp) +{ + struct nouveau_bios *bios = nouveau_bios(priv); + const int link = !(outp->sorconf.link & 1); + const int or = ffs(outp->or) - 1; + const u32 loff = (or * 0x800) + (link * 0x80); + const u16 mask = (outp->sorconf.link << 6) | outp->or; + u8 ver, hdr; + + if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp)) + nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000); +} + +static void +nv50_disp_intr_unk40_0(struct nv50_disp_priv *priv, int head) +{ + struct dcb_output outp; + u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; + if (exec_clkcmp(priv, head, 1, pclk, &outp) != ~0) { + if (outp.location == 0 && outp.type == DCB_OUTPUT_TMDS) + nv50_disp_intr_unk40_0_tmds(priv, &outp); + else + if (outp.location == 1 && outp.type == DCB_OUTPUT_DP) { + u32 soff = (ffs(outp.or) - 1) * 0x08; + u32 ctrl = nv_rd32(priv, 0x610b84 + soff); + u32 datarate; + + switch ((ctrl & 0x000f0000) >> 16) { + case 6: datarate = pclk * 30 / 8; break; + case 5: datarate = pclk * 24 / 8; break; + case 2: + default: + datarate = pclk * 18 / 8; + break; + } + + nouveau_dp_train(&priv->base, priv->pior.dp, + &outp, head, datarate); + } + } +} + +void +nv50_disp_intr_supervisor(struct work_struct *work) +{ + struct nv50_disp_priv *priv = + container_of(work, struct nv50_disp_priv, supervisor); + u32 super = nv_rd32(priv, 0x610030); + int head; + + nv_debug(priv, "supervisor 0x%08x 0x%08x\n", priv->super, super); + + if (priv->super & 0x00000010) { + for (head = 0; head < priv->head.nr; head++) { + if (!(super & (0x00000020 << head))) + continue; + if (!(super & (0x00000080 << head))) + continue; + nv50_disp_intr_unk10_0(priv, head); + } + } else + if (priv->super & 0x00000020) { + for (head = 0; head < priv->head.nr; head++) { + if (!(super & (0x00000080 << head))) + continue; + nv50_disp_intr_unk20_0(priv, head); + } + for (head = 0; head < priv->head.nr; head++) { + if (!(super & (0x00000200 << head))) + continue; + nv50_disp_intr_unk20_1(priv, head); + } + for (head = 0; head < priv->head.nr; head++) { + if (!(super & (0x00000080 << head))) + continue; + nv50_disp_intr_unk20_2(priv, head); + } + } else + if (priv->super & 0x00000040) { + for (head = 0; head < priv->head.nr; head++) { + if (!(super & (0x00000080 << head))) + continue; + nv50_disp_intr_unk40_0(priv, head); + } + } + + nv_wr32(priv, 0x610030, 0x80000000); +} + +void +nv50_disp_intr(struct nouveau_subdev *subdev) +{ + struct nv50_disp_priv *priv = (void *)subdev; + u32 intr0 = nv_rd32(priv, 0x610020); + u32 intr1 = nv_rd32(priv, 0x610024); + + if (intr0 & 0x001f0000) { + nv50_disp_intr_error(priv); + intr0 &= ~0x001f0000; + } + + if (intr1 & 0x00000004) { + nouveau_event_trigger(priv->base.vblank, 0); + nv_wr32(priv, 0x610024, 0x00000004); + intr1 &= ~0x00000004; + } + + if (intr1 & 0x00000008) { + nouveau_event_trigger(priv->base.vblank, 1); + nv_wr32(priv, 0x610024, 0x00000008); + intr1 &= ~0x00000008; + } + + if (intr1 & 0x00000070) { + priv->super = (intr1 & 0x00000070); + schedule_work(&priv->supervisor); + nv_wr32(priv, 0x610024, priv->super); + intr1 &= ~0x00000070; + } +} + +static int +nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_disp_priv *priv; + int ret; + + ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP", + "display", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_engine(priv)->sclass = nv50_disp_base_oclass; + nv_engine(priv)->cclass = &nv50_disp_cclass; + nv_subdev(priv)->intr = nv50_disp_intr; + INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor); + priv->sclass = nv50_disp_sclass; + priv->head.nr = 2; + priv->dac.nr = 3; + priv->sor.nr = 2; + priv->pior.nr = 3; + priv->dac.power = nv50_dac_power; + priv->dac.sense = nv50_dac_sense; + priv->sor.power = nv50_sor_power; + priv->pior.power = nv50_pior_power; + priv->pior.dp = &nv50_pior_dp_func; + return 0; +} + +struct nouveau_oclass +nv50_disp_oclass = { + .handle = NV_ENGINE(DISP, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_disp_ctor, + .dtor = _nouveau_disp_dtor, + .init = _nouveau_disp_init, + .fini = _nouveau_disp_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h new file mode 100644 index 0000000..1ae6ceb --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h @@ -0,0 +1,147 @@ +#ifndef __NV50_DISP_H__ +#define __NV50_DISP_H__ + +#include <core/parent.h> +#include <core/namedb.h> +#include <core/engctx.h> +#include <core/ramht.h> +#include <core/event.h> + +#include <engine/dmaobj.h> +#include <engine/disp.h> + +#include "dport.h" + +struct nv50_disp_priv { + struct nouveau_disp base; + struct nouveau_oclass *sclass; + + struct work_struct supervisor; + u32 super; + + struct { + int nr; + } head; + struct { + int nr; + int (*power)(struct nv50_disp_priv *, int dac, u32 data); + int (*sense)(struct nv50_disp_priv *, int dac, u32 load); + } dac; + struct { + int nr; + int (*power)(struct nv50_disp_priv *, int sor, u32 data); + int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32); + int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32); + u32 lvdsconf; + const struct nouveau_dp_func *dp; + } sor; + struct { + int nr; + int (*power)(struct nv50_disp_priv *, int ext, u32 data); + u8 type[3]; + const struct nouveau_dp_func *dp; + } pior; +}; + +#define DAC_MTHD(n) (n), (n) + 0x03 + +int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32); +int nv50_dac_power(struct nv50_disp_priv *, int, u32); +int nv50_dac_sense(struct nv50_disp_priv *, int, u32); + +#define SOR_MTHD(n) (n), (n) + 0x3f + +int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32); +int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32); + +int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); +int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); +int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); + +int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32); +int nv50_sor_power(struct nv50_disp_priv *, int, u32); + +int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16, + u32, struct dcb_output *); +int nv94_sor_dp_train_fini(struct nv50_disp_priv *, int, int, int, u16, u16, + u32, struct dcb_output *); +int nv94_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32, + struct dcb_output *); +int nv94_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, + struct dcb_output *); +int nv94_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, + struct dcb_output *); + +int nvd0_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32, + struct dcb_output *); +int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, + struct dcb_output *); +int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, + struct dcb_output *); + +#define PIOR_MTHD(n) (n), (n) + 0x03 + +int nv50_pior_mthd(struct nouveau_object *, u32, void *, u32); +int nv50_pior_power(struct nv50_disp_priv *, int, u32); + +struct nv50_disp_base { + struct nouveau_parent base; + struct nouveau_ramht *ramht; + u32 chan; +}; + +struct nv50_disp_chan { + struct nouveau_namedb base; + int chid; +}; + +int nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, int, int, void **); +void nv50_disp_chan_destroy(struct nv50_disp_chan *); +u32 nv50_disp_chan_rd32(struct nouveau_object *, u64); +void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32); + +#define nv50_disp_chan_init(a) \ + nouveau_namedb_init(&(a)->base) +#define nv50_disp_chan_fini(a,b) \ + nouveau_namedb_fini(&(a)->base, (b)) + +int nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, u32, int, int, void **); +void nv50_disp_dmac_dtor(struct nouveau_object *); + +struct nv50_disp_dmac { + struct nv50_disp_chan base; + struct nouveau_dmaobj *pushdma; + u32 push; +}; + +struct nv50_disp_pioc { + struct nv50_disp_chan base; +}; + +extern struct nouveau_ofuncs nv50_disp_mast_ofuncs; +extern struct nouveau_ofuncs nv50_disp_sync_ofuncs; +extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs; +extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs; +extern struct nouveau_ofuncs nv50_disp_curs_ofuncs; +extern struct nouveau_ofuncs nv50_disp_base_ofuncs; +extern struct nouveau_oclass nv50_disp_cclass; +void nv50_disp_intr_supervisor(struct work_struct *); +void nv50_disp_intr(struct nouveau_subdev *); + +extern struct nouveau_omthds nv84_disp_base_omthds[]; + +extern struct nouveau_omthds nva3_disp_base_omthds[]; + +extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs; +extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs; +extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs; +extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs; +extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs; +extern struct nouveau_ofuncs nvd0_disp_base_ofuncs; +extern struct nouveau_oclass nvd0_disp_cclass; +void nvd0_disp_intr_supervisor(struct work_struct *); +void nvd0_disp_intr(struct nouveau_subdev *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c new file mode 100644 index 0000000..d8c74c0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c @@ -0,0 +1,102 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <engine/software.h> +#include <engine/disp.h> + +#include <core/class.h> + +#include "nv50.h" + +static struct nouveau_oclass +nv84_disp_sclass[] = { + { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, + { NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, + { NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, + { NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, + { NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, + {} +}; + +struct nouveau_omthds +nv84_disp_base_omthds[] = { + { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, + { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, + { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, + { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, + { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, + { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, + { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd }, + { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd }, + {}, +}; + +static struct nouveau_oclass +nv84_disp_base_oclass[] = { + { NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds }, + {} +}; + +static int +nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_disp_priv *priv; + int ret; + + ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP", + "display", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_engine(priv)->sclass = nv84_disp_base_oclass; + nv_engine(priv)->cclass = &nv50_disp_cclass; + nv_subdev(priv)->intr = nv50_disp_intr; + INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor); + priv->sclass = nv84_disp_sclass; + priv->head.nr = 2; + priv->dac.nr = 3; + priv->sor.nr = 2; + priv->pior.nr = 3; + priv->dac.power = nv50_dac_power; + priv->dac.sense = nv50_dac_sense; + priv->sor.power = nv50_sor_power; + priv->sor.hdmi = nv84_hdmi_ctrl; + priv->pior.power = nv50_pior_power; + priv->pior.dp = &nv50_pior_dp_func; + return 0; +} + +struct nouveau_oclass +nv84_disp_oclass = { + .handle = NV_ENGINE(DISP, 0x82), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv84_disp_ctor, + .dtor = _nouveau_disp_dtor, + .init = _nouveau_disp_init, + .fini = _nouveau_disp_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c new file mode 100644 index 0000000..a66f949 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c @@ -0,0 +1,103 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <engine/software.h> +#include <engine/disp.h> + +#include <core/class.h> + +#include "nv50.h" + +static struct nouveau_oclass +nv94_disp_sclass[] = { + { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, + { NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, + { NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, + { NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, + { NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, + {} +}; + +static struct nouveau_omthds +nv94_disp_base_omthds[] = { + { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, + { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, + { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, + { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, + { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, + { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, + { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd }, + { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd }, + {}, +}; + +static struct nouveau_oclass +nv94_disp_base_oclass[] = { + { NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds }, + {} +}; + +static int +nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_disp_priv *priv; + int ret; + + ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP", + "display", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_engine(priv)->sclass = nv94_disp_base_oclass; + nv_engine(priv)->cclass = &nv50_disp_cclass; + nv_subdev(priv)->intr = nv50_disp_intr; + INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor); + priv->sclass = nv94_disp_sclass; + priv->head.nr = 2; + priv->dac.nr = 3; + priv->sor.nr = 4; + priv->pior.nr = 3; + priv->dac.power = nv50_dac_power; + priv->dac.sense = nv50_dac_sense; + priv->sor.power = nv50_sor_power; + priv->sor.hdmi = nv84_hdmi_ctrl; + priv->sor.dp = &nv94_sor_dp_func; + priv->pior.power = nv50_pior_power; + priv->pior.dp = &nv50_pior_dp_func; + return 0; +} + +struct nouveau_oclass +nv94_disp_oclass = { + .handle = NV_ENGINE(DISP, 0x88), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv94_disp_ctor, + .dtor = _nouveau_disp_dtor, + .init = _nouveau_disp_init, + .fini = _nouveau_disp_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c new file mode 100644 index 0000000..6cf8eef --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c @@ -0,0 +1,89 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <engine/software.h> +#include <engine/disp.h> + +#include <core/class.h> + +#include "nv50.h" + +static struct nouveau_oclass +nva0_disp_sclass[] = { + { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, + { NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, + { NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, + { NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, + { NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, + {} +}; + +static struct nouveau_oclass +nva0_disp_base_oclass[] = { + { NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds }, + {} +}; + +static int +nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_disp_priv *priv; + int ret; + + ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP", + "display", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_engine(priv)->sclass = nva0_disp_base_oclass; + nv_engine(priv)->cclass = &nv50_disp_cclass; + nv_subdev(priv)->intr = nv50_disp_intr; + INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor); + priv->sclass = nva0_disp_sclass; + priv->head.nr = 2; + priv->dac.nr = 3; + priv->sor.nr = 2; + priv->pior.nr = 3; + priv->dac.power = nv50_dac_power; + priv->dac.sense = nv50_dac_sense; + priv->sor.power = nv50_sor_power; + priv->sor.hdmi = nv84_hdmi_ctrl; + priv->pior.power = nv50_pior_power; + priv->pior.dp = &nv50_pior_dp_func; + return 0; +} + +struct nouveau_oclass +nva0_disp_oclass = { + .handle = NV_ENGINE(DISP, 0x83), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nva0_disp_ctor, + .dtor = _nouveau_disp_dtor, + .init = _nouveau_disp_init, + .fini = _nouveau_disp_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c new file mode 100644 index 0000000..b754131 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c @@ -0,0 +1,105 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <engine/software.h> +#include <engine/disp.h> + +#include <core/class.h> + +#include "nv50.h" + +static struct nouveau_oclass +nva3_disp_sclass[] = { + { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, + { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, + { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, + { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, + { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, + {} +}; + +struct nouveau_omthds +nva3_disp_base_omthds[] = { + { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, + { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd }, + { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, + { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, + { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, + { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, + { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, + { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd }, + { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd }, + {}, +}; + +static struct nouveau_oclass +nva3_disp_base_oclass[] = { + { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds }, + {} +}; + +static int +nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_disp_priv *priv; + int ret; + + ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP", + "display", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_engine(priv)->sclass = nva3_disp_base_oclass; + nv_engine(priv)->cclass = &nv50_disp_cclass; + nv_subdev(priv)->intr = nv50_disp_intr; + INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor); + priv->sclass = nva3_disp_sclass; + priv->head.nr = 2; + priv->dac.nr = 3; + priv->sor.nr = 4; + priv->pior.nr = 3; + priv->dac.power = nv50_dac_power; + priv->dac.sense = nv50_dac_sense; + priv->sor.power = nv50_sor_power; + priv->sor.hda_eld = nva3_hda_eld; + priv->sor.hdmi = nva3_hdmi_ctrl; + priv->sor.dp = &nv94_sor_dp_func; + priv->pior.power = nv50_pior_power; + priv->pior.dp = &nv50_pior_dp_func; + return 0; +} + +struct nouveau_oclass +nva3_disp_oclass = { + .handle = NV_ENGINE(DISP, 0x85), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nva3_disp_ctor, + .dtor = _nouveau_disp_dtor, + .init = _nouveau_disp_init, + .fini = _nouveau_disp_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c new file mode 100644 index 0000000..019eacd --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c @@ -0,0 +1,994 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <core/parent.h> +#include <core/handle.h> +#include <core/class.h> + +#include <engine/disp.h> + +#include <subdev/timer.h> +#include <subdev/fb.h> +#include <subdev/clock.h> + +#include <subdev/bios.h> +#include <subdev/bios/dcb.h> +#include <subdev/bios/disp.h> +#include <subdev/bios/init.h> +#include <subdev/bios/pll.h> + +#include "nv50.h" + +/******************************************************************************* + * EVO DMA channel base class + ******************************************************************************/ + +static int +nvd0_disp_dmac_object_attach(struct nouveau_object *parent, + struct nouveau_object *object, u32 name) +{ + struct nv50_disp_base *base = (void *)parent->parent; + struct nv50_disp_chan *chan = (void *)parent; + u32 addr = nv_gpuobj(object)->node->offset; + u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001; + return nouveau_ramht_insert(base->ramht, chan->chid, name, data); +} + +static void +nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie) +{ + struct nv50_disp_base *base = (void *)parent->parent; + nouveau_ramht_remove(base->ramht, cookie); +} + +static int +nvd0_disp_dmac_init(struct nouveau_object *object) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_dmac *dmac = (void *)object; + int chid = dmac->base.chid; + int ret; + + ret = nv50_disp_chan_init(&dmac->base); + if (ret) + return ret; + + /* enable error reporting */ + nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid); + nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); + + /* initialise channel for dma command submission */ + nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push); + nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000); + nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001); + nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010); + nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000); + nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013); + + /* wait for it to go inactive */ + if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) { + nv_error(dmac, "init: 0x%08x\n", + nv_rd32(priv, 0x610490 + (chid * 0x10))); + return -EBUSY; + } + + return 0; +} + +static int +nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_dmac *dmac = (void *)object; + int chid = dmac->base.chid; + + /* deactivate channel */ + nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000); + nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000); + if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) { + nv_error(dmac, "fini: 0x%08x\n", + nv_rd32(priv, 0x610490 + (chid * 0x10))); + if (suspend) + return -EBUSY; + } + + /* disable error reporting */ + nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000); + nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000); + + return nv50_disp_chan_fini(&dmac->base, suspend); +} + +/******************************************************************************* + * EVO master channel object + ******************************************************************************/ + +static int +nvd0_disp_mast_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_display_mast_class *args = data; + struct nv50_disp_dmac *mast; + int ret; + + if (size < sizeof(*args)) + return -EINVAL; + + ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, + 0, sizeof(*mast), (void **)&mast); + *pobject = nv_object(mast); + if (ret) + return ret; + + nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach; + nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach; + return 0; +} + +static int +nvd0_disp_mast_init(struct nouveau_object *object) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_dmac *mast = (void *)object; + int ret; + + ret = nv50_disp_chan_init(&mast->base); + if (ret) + return ret; + + /* enable error reporting */ + nv_mask(priv, 0x610090, 0x00000001, 0x00000001); + nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001); + + /* initialise channel for dma command submission */ + nv_wr32(priv, 0x610494, mast->push); + nv_wr32(priv, 0x610498, 0x00010000); + nv_wr32(priv, 0x61049c, 0x00000001); + nv_mask(priv, 0x610490, 0x00000010, 0x00000010); + nv_wr32(priv, 0x640000, 0x00000000); + nv_wr32(priv, 0x610490, 0x01000013); + + /* wait for it to go inactive */ + if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) { + nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490)); + return -EBUSY; + } + + return 0; +} + +static int +nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_dmac *mast = (void *)object; + + /* deactivate channel */ + nv_mask(priv, 0x610490, 0x00000010, 0x00000000); + nv_mask(priv, 0x610490, 0x00000003, 0x00000000); + if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) { + nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490)); + if (suspend) + return -EBUSY; + } + + /* disable error reporting */ + nv_mask(priv, 0x610090, 0x00000001, 0x00000000); + nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000); + + return nv50_disp_chan_fini(&mast->base, suspend); +} + +struct nouveau_ofuncs +nvd0_disp_mast_ofuncs = { + .ctor = nvd0_disp_mast_ctor, + .dtor = nv50_disp_dmac_dtor, + .init = nvd0_disp_mast_init, + .fini = nvd0_disp_mast_fini, + .rd32 = nv50_disp_chan_rd32, + .wr32 = nv50_disp_chan_wr32, +}; + +/******************************************************************************* + * EVO sync channel objects + ******************************************************************************/ + +static int +nvd0_disp_sync_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_display_sync_class *args = data; + struct nv50_disp_priv *priv = (void *)engine; + struct nv50_disp_dmac *dmac; + int ret; + + if (size < sizeof(*args) || args->head >= priv->head.nr) + return -EINVAL; + + ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, + 1 + args->head, sizeof(*dmac), + (void **)&dmac); + *pobject = nv_object(dmac); + if (ret) + return ret; + + nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach; + nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach; + return 0; +} + +struct nouveau_ofuncs +nvd0_disp_sync_ofuncs = { + .ctor = nvd0_disp_sync_ctor, + .dtor = nv50_disp_dmac_dtor, + .init = nvd0_disp_dmac_init, + .fini = nvd0_disp_dmac_fini, + .rd32 = nv50_disp_chan_rd32, + .wr32 = nv50_disp_chan_wr32, +}; + +/******************************************************************************* + * EVO overlay channel objects + ******************************************************************************/ + +static int +nvd0_disp_ovly_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_display_ovly_class *args = data; + struct nv50_disp_priv *priv = (void *)engine; + struct nv50_disp_dmac *dmac; + int ret; + + if (size < sizeof(*args) || args->head >= priv->head.nr) + return -EINVAL; + + ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, + 5 + args->head, sizeof(*dmac), + (void **)&dmac); + *pobject = nv_object(dmac); + if (ret) + return ret; + + nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach; + nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach; + return 0; +} + +struct nouveau_ofuncs +nvd0_disp_ovly_ofuncs = { + .ctor = nvd0_disp_ovly_ctor, + .dtor = nv50_disp_dmac_dtor, + .init = nvd0_disp_dmac_init, + .fini = nvd0_disp_dmac_fini, + .rd32 = nv50_disp_chan_rd32, + .wr32 = nv50_disp_chan_wr32, +}; + +/******************************************************************************* + * EVO PIO channel base class + ******************************************************************************/ + +static int +nvd0_disp_pioc_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, int chid, + int length, void **pobject) +{ + return nv50_disp_chan_create_(parent, engine, oclass, chid, + length, pobject); +} + +static void +nvd0_disp_pioc_dtor(struct nouveau_object *object) +{ + struct nv50_disp_pioc *pioc = (void *)object; + nv50_disp_chan_destroy(&pioc->base); +} + +static int +nvd0_disp_pioc_init(struct nouveau_object *object) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_pioc *pioc = (void *)object; + int chid = pioc->base.chid; + int ret; + + ret = nv50_disp_chan_init(&pioc->base); + if (ret) + return ret; + + /* enable error reporting */ + nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid); + nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); + + /* activate channel */ + nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001); + if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) { + nv_error(pioc, "init: 0x%08x\n", + nv_rd32(priv, 0x610490 + (chid * 0x10))); + return -EBUSY; + } + + return 0; +} + +static int +nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_pioc *pioc = (void *)object; + int chid = pioc->base.chid; + + nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000); + if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) { + nv_error(pioc, "timeout: 0x%08x\n", + nv_rd32(priv, 0x610490 + (chid * 0x10))); + if (suspend) + return -EBUSY; + } + + /* disable error reporting */ + nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000); + nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000); + + return nv50_disp_chan_fini(&pioc->base, suspend); +} + +/******************************************************************************* + * EVO immediate overlay channel objects + ******************************************************************************/ + +static int +nvd0_disp_oimm_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_display_oimm_class *args = data; + struct nv50_disp_priv *priv = (void *)engine; + struct nv50_disp_pioc *pioc; + int ret; + + if (size < sizeof(*args) || args->head >= priv->head.nr) + return -EINVAL; + + ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head, + sizeof(*pioc), (void **)&pioc); + *pobject = nv_object(pioc); + if (ret) + return ret; + + return 0; +} + +struct nouveau_ofuncs +nvd0_disp_oimm_ofuncs = { + .ctor = nvd0_disp_oimm_ctor, + .dtor = nvd0_disp_pioc_dtor, + .init = nvd0_disp_pioc_init, + .fini = nvd0_disp_pioc_fini, + .rd32 = nv50_disp_chan_rd32, + .wr32 = nv50_disp_chan_wr32, +}; + +/******************************************************************************* + * EVO cursor channel objects + ******************************************************************************/ + +static int +nvd0_disp_curs_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_display_curs_class *args = data; + struct nv50_disp_priv *priv = (void *)engine; + struct nv50_disp_pioc *pioc; + int ret; + + if (size < sizeof(*args) || args->head >= priv->head.nr) + return -EINVAL; + + ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head, + sizeof(*pioc), (void **)&pioc); + *pobject = nv_object(pioc); + if (ret) + return ret; + + return 0; +} + +struct nouveau_ofuncs +nvd0_disp_curs_ofuncs = { + .ctor = nvd0_disp_curs_ctor, + .dtor = nvd0_disp_pioc_dtor, + .init = nvd0_disp_pioc_init, + .fini = nvd0_disp_pioc_fini, + .rd32 = nv50_disp_chan_rd32, + .wr32 = nv50_disp_chan_wr32, +}; + +/******************************************************************************* + * Base display object + ******************************************************************************/ + +static void +nvd0_disp_base_vblank_enable(struct nouveau_event *event, int head) +{ + nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001); +} + +static void +nvd0_disp_base_vblank_disable(struct nouveau_event *event, int head) +{ + nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000); +} + +static int +nvd0_disp_base_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_disp_priv *priv = (void *)engine; + struct nv50_disp_base *base; + int ret; + + ret = nouveau_parent_create(parent, engine, oclass, 0, + priv->sclass, 0, &base); + *pobject = nv_object(base); + if (ret) + return ret; + + priv->base.vblank->priv = priv; + priv->base.vblank->enable = nvd0_disp_base_vblank_enable; + priv->base.vblank->disable = nvd0_disp_base_vblank_disable; + + return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0, + &base->ramht); +} + +static void +nvd0_disp_base_dtor(struct nouveau_object *object) +{ + struct nv50_disp_base *base = (void *)object; + nouveau_ramht_ref(NULL, &base->ramht); + nouveau_parent_destroy(&base->base); +} + +static int +nvd0_disp_base_init(struct nouveau_object *object) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_base *base = (void *)object; + int ret, i; + u32 tmp; + + ret = nouveau_parent_init(&base->base); + if (ret) + return ret; + + /* The below segments of code copying values from one register to + * another appear to inform EVO of the display capabilities or + * something similar. + */ + + /* ... CRTC caps */ + for (i = 0; i < priv->head.nr; i++) { + tmp = nv_rd32(priv, 0x616104 + (i * 0x800)); + nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp); + tmp = nv_rd32(priv, 0x616108 + (i * 0x800)); + nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp); + tmp = nv_rd32(priv, 0x61610c + (i * 0x800)); + nv_wr32(priv, 0x6101bc + (i * 0x800), tmp); + } + + /* ... DAC caps */ + for (i = 0; i < priv->dac.nr; i++) { + tmp = nv_rd32(priv, 0x61a000 + (i * 0x800)); + nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp); + } + + /* ... SOR caps */ + for (i = 0; i < priv->sor.nr; i++) { + tmp = nv_rd32(priv, 0x61c000 + (i * 0x800)); + nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp); + } + + /* steal display away from vbios, or something like that */ + if (nv_rd32(priv, 0x6100ac) & 0x00000100) { + nv_wr32(priv, 0x6100ac, 0x00000100); + nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000); + if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) { + nv_error(priv, "timeout acquiring display\n"); + return -EBUSY; + } + } + + /* point at display engine memory area (hash table, objects) */ + nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9); + + /* enable supervisor interrupts, disable everything else */ + nv_wr32(priv, 0x610090, 0x00000000); + nv_wr32(priv, 0x6100a0, 0x00000000); + nv_wr32(priv, 0x6100b0, 0x00000307); + + return 0; +} + +static int +nvd0_disp_base_fini(struct nouveau_object *object, bool suspend) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nv50_disp_base *base = (void *)object; + + /* disable all interrupts */ + nv_wr32(priv, 0x6100b0, 0x00000000); + + return nouveau_parent_fini(&base->base, suspend); +} + +struct nouveau_ofuncs +nvd0_disp_base_ofuncs = { + .ctor = nvd0_disp_base_ctor, + .dtor = nvd0_disp_base_dtor, + .init = nvd0_disp_base_init, + .fini = nvd0_disp_base_fini, +}; + +static struct nouveau_oclass +nvd0_disp_base_oclass[] = { + { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds }, + {} +}; + +static struct nouveau_oclass +nvd0_disp_sclass[] = { + { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, + { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, + { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, + { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, + { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, + {} +}; + +/******************************************************************************* + * Display engine implementation + ******************************************************************************/ + +static u16 +exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, + struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_outp *info) +{ + struct nouveau_bios *bios = nouveau_bios(priv); + u16 mask, type, data; + + if (outp < 4) { + type = DCB_OUTPUT_ANALOG; + mask = 0; + } else { + outp -= 4; + switch (ctrl & 0x00000f00) { + case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break; + case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break; + case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break; + case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break; + case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break; + case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; + default: + nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl); + return 0x0000; + } + dcb->sorconf.link = mask; + } + + mask = 0x00c0 & (mask << 6); + mask |= 0x0001 << outp; + mask |= 0x0100 << head; + + data = dcb_outp_match(bios, type, mask, ver, hdr, dcb); + if (!data) + return 0x0000; + + return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info); +} + +static bool +exec_script(struct nv50_disp_priv *priv, int head, int id) +{ + struct nouveau_bios *bios = nouveau_bios(priv); + struct nvbios_outp info; + struct dcb_output dcb; + u8 ver, hdr, cnt, len; + u32 ctrl = 0x00000000; + u16 data; + int outp; + + for (outp = 0; !(ctrl & (1 << head)) && outp < 8; outp++) { + ctrl = nv_rd32(priv, 0x640180 + (outp * 0x20)); + if (ctrl & (1 << head)) + break; + } + + if (outp == 8) + return false; + + data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info); + if (data) { + struct nvbios_init init = { + .subdev = nv_subdev(priv), + .bios = bios, + .offset = info.script[id], + .outp = &dcb, + .crtc = head, + .execute = 1, + }; + + return nvbios_exec(&init) == 0; + } + + return false; +} + +static u32 +exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, + u32 pclk, struct dcb_output *dcb) +{ + struct nouveau_bios *bios = nouveau_bios(priv); + struct nvbios_outp info1; + struct nvbios_ocfg info2; + u8 ver, hdr, cnt, len; + u32 ctrl = 0x00000000; + u32 data, conf = ~0; + int outp; + + for (outp = 0; !(ctrl & (1 << head)) && outp < 8; outp++) { + ctrl = nv_rd32(priv, 0x660180 + (outp * 0x20)); + if (ctrl & (1 << head)) + break; + } + + if (outp == 8) + return false; + + data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1); + if (data == 0x0000) + return conf; + + switch (dcb->type) { + case DCB_OUTPUT_TMDS: + conf = (ctrl & 0x00000f00) >> 8; + if (pclk >= 165000) + conf |= 0x0100; + break; + case DCB_OUTPUT_LVDS: + conf = priv->sor.lvdsconf; + break; + case DCB_OUTPUT_DP: + conf = (ctrl & 0x00000f00) >> 8; + break; + case DCB_OUTPUT_ANALOG: + default: + conf = 0x00ff; + break; + } + + data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2); + if (data && id < 0xff) { + data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); + if (data) { + struct nvbios_init init = { + .subdev = nv_subdev(priv), + .bios = bios, + .offset = data, + .outp = dcb, + .crtc = head, + .execute = 1, + }; + + nvbios_exec(&init); + } + } + + return conf; +} + +static void +nvd0_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head) +{ + exec_script(priv, head, 1); +} + +static void +nvd0_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head) +{ + exec_script(priv, head, 2); +} + +static void +nvd0_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head) +{ + struct nouveau_clock *clk = nouveau_clock(priv); + u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; + if (pclk) + clk->pll_set(clk, PLL_VPLL0 + head, pclk); + nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000); +} + +static void +nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head, + struct dcb_output *outp) +{ + const int or = ffs(outp->or) - 1; + const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020)); + const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300)); + const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; + const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1; + const u32 hoff = (head * 0x800); + const u32 soff = ( or * 0x800); + const u32 loff = (link * 0x080) + soff; + const u32 symbol = 100000; + const u32 TU = 64; + u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x000f0000; + u32 clksor = nv_rd32(priv, 0x612300 + soff); + u32 datarate, link_nr, link_bw, bits; + u64 ratio, value; + + if ((conf & 0x3c0) == 0x180) bits = 30; + else if ((conf & 0x3c0) == 0x140) bits = 24; + else bits = 18; + datarate = (pclk * bits) / 8; + + if (dpctrl > 0x00030000) link_nr = 4; + else if (dpctrl > 0x00010000) link_nr = 2; + else link_nr = 1; + + link_bw = (clksor & 0x007c0000) >> 18; + link_bw *= 27000; + + ratio = datarate; + ratio *= symbol; + do_div(ratio, link_nr * link_bw); + + value = (symbol - ratio) * TU; + value *= ratio; + do_div(value, symbol); + do_div(value, symbol); + + value += 5; + value |= 0x08000000; + + nv_wr32(priv, 0x616610 + hoff, value); +} + +static void +nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head) +{ + struct dcb_output outp; + u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; + u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp); + if (conf != ~0) { + u32 addr, data; + + if (outp.type == DCB_OUTPUT_DP) { + u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300)); + switch ((sync & 0x000003c0) >> 6) { + case 6: pclk = pclk * 30 / 8; break; + case 5: pclk = pclk * 24 / 8; break; + case 2: + default: + pclk = pclk * 18 / 8; + break; + } + + nouveau_dp_train(&priv->base, priv->sor.dp, + &outp, head, pclk); + } + + exec_clkcmp(priv, head, 0, pclk, &outp); + + if (outp.type == DCB_OUTPUT_ANALOG) { + addr = 0x612280 + (ffs(outp.or) - 1) * 0x800; + data = 0x00000000; + } else { + if (outp.type == DCB_OUTPUT_DP) + nvd0_disp_intr_unk2_2_tu(priv, head, &outp); + addr = 0x612300 + (ffs(outp.or) - 1) * 0x800; + data = (conf & 0x0100) ? 0x00000101 : 0x00000000; + } + + nv_mask(priv, addr, 0x00000707, data); + } +} + +static void +nvd0_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head) +{ + struct dcb_output outp; + u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; + exec_clkcmp(priv, head, 1, pclk, &outp); +} + +void +nvd0_disp_intr_supervisor(struct work_struct *work) +{ + struct nv50_disp_priv *priv = + container_of(work, struct nv50_disp_priv, supervisor); + u32 mask[4]; + int head; + + nv_debug(priv, "supervisor %08x\n", priv->super); + for (head = 0; head < priv->head.nr; head++) { + mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800)); + nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]); + } + + if (priv->super & 0x00000001) { + for (head = 0; head < priv->head.nr; head++) { + if (!(mask[head] & 0x00001000)) + continue; + nvd0_disp_intr_unk1_0(priv, head); + } + } else + if (priv->super & 0x00000002) { + for (head = 0; head < priv->head.nr; head++) { + if (!(mask[head] & 0x00001000)) + continue; + nvd0_disp_intr_unk2_0(priv, head); + } + for (head = 0; head < priv->head.nr; head++) { + if (!(mask[head] & 0x00010000)) + continue; + nvd0_disp_intr_unk2_1(priv, head); + } + for (head = 0; head < priv->head.nr; head++) { + if (!(mask[head] & 0x00001000)) + continue; + nvd0_disp_intr_unk2_2(priv, head); + } + } else + if (priv->super & 0x00000004) { + for (head = 0; head < priv->head.nr; head++) { + if (!(mask[head] & 0x00001000)) + continue; + nvd0_disp_intr_unk4_0(priv, head); + } + } + + for (head = 0; head < priv->head.nr; head++) + nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000); + nv_wr32(priv, 0x6101d0, 0x80000000); +} + +void +nvd0_disp_intr(struct nouveau_subdev *subdev) +{ + struct nv50_disp_priv *priv = (void *)subdev; + u32 intr = nv_rd32(priv, 0x610088); + int i; + + if (intr & 0x00000001) { + u32 stat = nv_rd32(priv, 0x61008c); + nv_wr32(priv, 0x61008c, stat); + intr &= ~0x00000001; + } + + if (intr & 0x00000002) { + u32 stat = nv_rd32(priv, 0x61009c); + int chid = ffs(stat) - 1; + if (chid >= 0) { + u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12)); + u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12)); + u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12)); + + nv_error(priv, "chid %d mthd 0x%04x data 0x%08x " + "0x%08x 0x%08x\n", + chid, (mthd & 0x0000ffc), data, mthd, unkn); + nv_wr32(priv, 0x61009c, (1 << chid)); + nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000); + } + + intr &= ~0x00000002; + } + + if (intr & 0x00100000) { + u32 stat = nv_rd32(priv, 0x6100ac); + if (stat & 0x00000007) { + priv->super = (stat & 0x00000007); + schedule_work(&priv->supervisor); + nv_wr32(priv, 0x6100ac, priv->super); + stat &= ~0x00000007; + } + + if (stat) { + nv_info(priv, "unknown intr24 0x%08x\n", stat); + nv_wr32(priv, 0x6100ac, stat); + } + + intr &= ~0x00100000; + } + + for (i = 0; i < priv->head.nr; i++) { + u32 mask = 0x01000000 << i; + if (mask & intr) { + u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800)); + if (stat & 0x00000001) + nouveau_event_trigger(priv->base.vblank, i); + nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0); + nv_rd32(priv, 0x6100c0 + (i * 0x800)); + } + } +} + +static int +nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_disp_priv *priv; + int heads = nv_rd32(parent, 0x022448); + int ret; + + ret = nouveau_disp_create(parent, engine, oclass, heads, + "PDISP", "display", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_engine(priv)->sclass = nvd0_disp_base_oclass; + nv_engine(priv)->cclass = &nv50_disp_cclass; + nv_subdev(priv)->intr = nvd0_disp_intr; + INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor); + priv->sclass = nvd0_disp_sclass; + priv->head.nr = heads; + priv->dac.nr = 3; + priv->sor.nr = 4; + priv->dac.power = nv50_dac_power; + priv->dac.sense = nv50_dac_sense; + priv->sor.power = nv50_sor_power; + priv->sor.hda_eld = nvd0_hda_eld; + priv->sor.hdmi = nvd0_hdmi_ctrl; + priv->sor.dp = &nvd0_sor_dp_func; + return 0; +} + +struct nouveau_oclass +nvd0_disp_oclass = { + .handle = NV_ENGINE(DISP, 0x90), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvd0_disp_ctor, + .dtor = _nouveau_disp_dtor, + .init = _nouveau_disp_init, + .fini = _nouveau_disp_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c new file mode 100644 index 0000000..20725b3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c @@ -0,0 +1,89 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <engine/software.h> +#include <engine/disp.h> + +#include <core/class.h> + +#include "nv50.h" + +static struct nouveau_oclass +nve0_disp_sclass[] = { + { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, + { NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, + { NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, + { NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, + { NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, + {} +}; + +static struct nouveau_oclass +nve0_disp_base_oclass[] = { + { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds }, + {} +}; + +static int +nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_disp_priv *priv; + int heads = nv_rd32(parent, 0x022448); + int ret; + + ret = nouveau_disp_create(parent, engine, oclass, heads, + "PDISP", "display", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_engine(priv)->sclass = nve0_disp_base_oclass; + nv_engine(priv)->cclass = &nv50_disp_cclass; + nv_subdev(priv)->intr = nvd0_disp_intr; + INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor); + priv->sclass = nve0_disp_sclass; + priv->head.nr = heads; + priv->dac.nr = 3; + priv->sor.nr = 4; + priv->dac.power = nv50_dac_power; + priv->dac.sense = nv50_dac_sense; + priv->sor.power = nv50_sor_power; + priv->sor.hda_eld = nvd0_hda_eld; + priv->sor.hdmi = nvd0_hdmi_ctrl; + priv->sor.dp = &nvd0_sor_dp_func; + return 0; +} + +struct nouveau_oclass +nve0_disp_oclass = { + .handle = NV_ENGINE(DISP, 0x91), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nve0_disp_ctor, + .dtor = _nouveau_disp_dtor, + .init = _nouveau_disp_init, + .fini = _nouveau_disp_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c new file mode 100644 index 0000000..a488c36 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c @@ -0,0 +1,89 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <engine/software.h> +#include <engine/disp.h> + +#include <core/class.h> + +#include "nv50.h" + +static struct nouveau_oclass +nvf0_disp_sclass[] = { + { NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, + { NVF0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, + { NVF0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, + { NVF0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, + { NVF0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, + {} +}; + +static struct nouveau_oclass +nvf0_disp_base_oclass[] = { + { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds }, + {} +}; + +static int +nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_disp_priv *priv; + int heads = nv_rd32(parent, 0x022448); + int ret; + + ret = nouveau_disp_create(parent, engine, oclass, heads, + "PDISP", "display", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_engine(priv)->sclass = nvf0_disp_base_oclass; + nv_engine(priv)->cclass = &nv50_disp_cclass; + nv_subdev(priv)->intr = nvd0_disp_intr; + INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor); + priv->sclass = nvf0_disp_sclass; + priv->head.nr = heads; + priv->dac.nr = 3; + priv->sor.nr = 4; + priv->dac.power = nv50_dac_power; + priv->dac.sense = nv50_dac_sense; + priv->sor.power = nv50_sor_power; + priv->sor.hda_eld = nvd0_hda_eld; + priv->sor.hdmi = nvd0_hdmi_ctrl; + priv->sor.dp = &nvd0_sor_dp_func; + return 0; +} + +struct nouveau_oclass +nvf0_disp_oclass = { + .handle = NV_ENGINE(DISP, 0x92), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvf0_disp_ctor, + .dtor = _nouveau_disp_dtor, + .init = _nouveau_disp_init, + .fini = _nouveau_disp_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c new file mode 100644 index 0000000..2c8ce35 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c @@ -0,0 +1,140 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> + +#include <subdev/bios.h> +#include <subdev/bios/dcb.h> +#include <subdev/timer.h> +#include <subdev/i2c.h> + +#include "nv50.h" + +/****************************************************************************** + * DisplayPort + *****************************************************************************/ +static struct nouveau_i2c_port * +nv50_pior_dp_find(struct nouveau_disp *disp, struct dcb_output *outp) +{ + struct nouveau_i2c *i2c = nouveau_i2c(disp); + return i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(outp->extdev)); +} + +static int +nv50_pior_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp, + int head, int pattern) +{ + struct nouveau_i2c_port *port; + int ret = -EINVAL; + + port = nv50_pior_dp_find(disp, outp); + if (port) { + if (port->func->pattern) + ret = port->func->pattern(port, pattern); + else + ret = 0; + } + + return ret; +} + +static int +nv50_pior_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp, + int head, int lane_nr, int link_bw, bool enh) +{ + struct nouveau_i2c_port *port; + int ret = -EINVAL; + + port = nv50_pior_dp_find(disp, outp); + if (port && port->func->lnk_ctl) + ret = port->func->lnk_ctl(port, lane_nr, link_bw, enh); + + return ret; +} + +static int +nv50_pior_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp, + int head, int lane, int vsw, int pre) +{ + struct nouveau_i2c_port *port; + int ret = -EINVAL; + + port = nv50_pior_dp_find(disp, outp); + if (port) { + if (port->func->drv_ctl) + ret = port->func->drv_ctl(port, lane, vsw, pre); + else + ret = 0; + } + + return ret; +} + +const struct nouveau_dp_func +nv50_pior_dp_func = { + .pattern = nv50_pior_dp_pattern, + .lnk_ctl = nv50_pior_dp_lnk_ctl, + .drv_ctl = nv50_pior_dp_drv_ctl, +}; + +/****************************************************************************** + * General PIOR handling + *****************************************************************************/ +int +nv50_pior_power(struct nv50_disp_priv *priv, int or, u32 data) +{ + const u32 stat = data & NV50_DISP_PIOR_PWR_STATE; + const u32 soff = (or * 0x800); + nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000); + nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | stat); + nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000); + return 0; +} + +int +nv50_pior_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + const u8 type = (mthd & NV50_DISP_PIOR_MTHD_TYPE) >> 12; + const u8 or = (mthd & NV50_DISP_PIOR_MTHD_OR); + u32 *data = args; + int ret; + + if (size < sizeof(u32)) + return -EINVAL; + + mthd &= ~NV50_DISP_PIOR_MTHD_TYPE; + mthd &= ~NV50_DISP_PIOR_MTHD_OR; + switch (mthd) { + case NV50_DISP_PIOR_PWR: + ret = priv->pior.power(priv, or, data[0]); + priv->pior.type[or] = type; + break; + default: + return -EINVAL; + } + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c new file mode 100644 index 0000000..ab1e918 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c @@ -0,0 +1,87 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> + +#include <subdev/bios.h> +#include <subdev/bios/dcb.h> +#include <subdev/timer.h> + +#include "nv50.h" + +int +nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data) +{ + const u32 stat = data & NV50_DISP_SOR_PWR_STATE; + const u32 soff = (or * 0x800); + nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000); + nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat); + nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000); + nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000); + return 0; +} + +int +nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) +{ + struct nv50_disp_priv *priv = (void *)object->engine; + struct nouveau_bios *bios = nouveau_bios(priv); + const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12; + const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3; + const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2; + const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR); + const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or); + struct dcb_output outp; + u8 ver, hdr; + u32 data; + int ret = -EINVAL; + + if (size < sizeof(u32)) + return -EINVAL; + data = *(u32 *)args; + + if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp)) + return -ENODEV; + + switch (mthd & ~0x3f) { + case NV50_DISP_SOR_PWR: + ret = priv->sor.power(priv, or, data); + break; + case NVA3_DISP_SOR_HDA_ELD: + ret = priv->sor.hda_eld(priv, or, args, size); + break; + case NV84_DISP_SOR_HDMI_PWR: + ret = priv->sor.hdmi(priv, head, or, data); + break; + case NV50_DISP_SOR_LVDS_SCRIPT: + priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID; + ret = 0; + break; + default: + BUG_ON(1); + } + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c new file mode 100644 index 0000000..7ec4ee8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c @@ -0,0 +1,127 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> + +#include <subdev/bios.h> +#include <subdev/bios/dcb.h> +#include <subdev/bios/dp.h> +#include <subdev/bios/init.h> + +#include "nv50.h" + +static inline u32 +nv94_sor_soff(struct dcb_output *outp) +{ + return (ffs(outp->or) - 1) * 0x800; +} + +static inline u32 +nv94_sor_loff(struct dcb_output *outp) +{ + return nv94_sor_soff(outp) + !(outp->sorconf.link & 1) * 0x80; +} + +static inline u32 +nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane) +{ + static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */ + static const u8 nv94[] = { 16, 8, 0, 24 }; + if (nv_device(priv)->chipset == 0xaf) + return nvaf[lane]; + return nv94[lane]; +} + +static int +nv94_sor_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp, + int head, int pattern) +{ + struct nv50_disp_priv *priv = (void *)disp; + const u32 loff = nv94_sor_loff(outp); + nv_mask(priv, 0x61c10c + loff, 0x0f000000, pattern << 24); + return 0; +} + +static int +nv94_sor_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp, + int head, int link_nr, int link_bw, bool enh_frame) +{ + struct nv50_disp_priv *priv = (void *)disp; + const u32 soff = nv94_sor_soff(outp); + const u32 loff = nv94_sor_loff(outp); + u32 dpctrl = 0x00000000; + u32 clksor = 0x00000000; + u32 lane = 0; + int i; + + dpctrl |= ((1 << link_nr) - 1) << 16; + if (enh_frame) + dpctrl |= 0x00004000; + if (link_bw > 0x06) + clksor |= 0x00040000; + + for (i = 0; i < link_nr; i++) + lane |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3); + + nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor); + nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl); + nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane); + return 0; +} + +static int +nv94_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp, + int head, int lane, int swing, int preem) +{ + struct nouveau_bios *bios = nouveau_bios(disp); + struct nv50_disp_priv *priv = (void *)disp; + const u32 loff = nv94_sor_loff(outp); + u32 addr, shift = nv94_sor_dp_lane_map(priv, lane); + u8 ver, hdr, cnt, len; + struct nvbios_dpout info; + struct nvbios_dpcfg ocfg; + + addr = nvbios_dpout_match(bios, outp->hasht, outp->hashm, + &ver, &hdr, &cnt, &len, &info); + if (!addr) + return -ENODEV; + + addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, + &ver, &hdr, &cnt, &len, &ocfg); + if (!addr) + return -EINVAL; + + nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift); + nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift); + nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8); + return 0; +} + +const struct nouveau_dp_func +nv94_sor_dp_func = { + .pattern = nv94_sor_dp_pattern, + .lnk_ctl = nv94_sor_dp_lnk_ctl, + .drv_ctl = nv94_sor_dp_drv_ctl, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c new file mode 100644 index 0000000..9e1d435 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c @@ -0,0 +1,124 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> + +#include <subdev/bios.h> +#include <subdev/bios/dcb.h> +#include <subdev/bios/dp.h> +#include <subdev/bios/init.h> + +#include "nv50.h" + +static inline u32 +nvd0_sor_soff(struct dcb_output *outp) +{ + return (ffs(outp->or) - 1) * 0x800; +} + +static inline u32 +nvd0_sor_loff(struct dcb_output *outp) +{ + return nvd0_sor_soff(outp) + !(outp->sorconf.link & 1) * 0x80; +} + +static inline u32 +nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane) +{ + static const u8 nvd0[] = { 16, 8, 0, 24 }; + return nvd0[lane]; +} + +static int +nvd0_sor_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp, + int head, int pattern) +{ + struct nv50_disp_priv *priv = (void *)disp; + const u32 loff = nvd0_sor_loff(outp); + nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern); + return 0; +} + +static int +nvd0_sor_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp, + int head, int link_nr, int link_bw, bool enh_frame) +{ + struct nv50_disp_priv *priv = (void *)disp; + const u32 soff = nvd0_sor_soff(outp); + const u32 loff = nvd0_sor_loff(outp); + u32 dpctrl = 0x00000000; + u32 clksor = 0x00000000; + u32 lane = 0; + int i; + + clksor |= link_bw << 18; + dpctrl |= ((1 << link_nr) - 1) << 16; + if (enh_frame) + dpctrl |= 0x00004000; + + for (i = 0; i < link_nr; i++) + lane |= 1 << (nvd0_sor_dp_lane_map(priv, i) >> 3); + + nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor); + nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl); + nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane); + return 0; +} + +static int +nvd0_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp, + int head, int lane, int swing, int preem) +{ + struct nouveau_bios *bios = nouveau_bios(disp); + struct nv50_disp_priv *priv = (void *)disp; + const u32 loff = nvd0_sor_loff(outp); + u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane); + u8 ver, hdr, cnt, len; + struct nvbios_dpout info; + struct nvbios_dpcfg ocfg; + + addr = nvbios_dpout_match(bios, outp->hasht, outp->hashm, + &ver, &hdr, &cnt, &len, &info); + if (!addr) + return -ENODEV; + + addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, + &ver, &hdr, &cnt, &len, &ocfg); + if (!addr) + return -EINVAL; + + nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift); + nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift); + nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8); + nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000); + return 0; +} + +const struct nouveau_dp_func +nvd0_sor_dp_func = { + .pattern = nvd0_sor_dp_pattern, + .lnk_ctl = nvd0_sor_dp_lnk_ctl, + .drv_ctl = nvd0_sor_dp_drv_ctl, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/vga.c b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c new file mode 100644 index 0000000..5a1c684 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c @@ -0,0 +1,215 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/subdev.h> +#include <core/device.h> +#include <subdev/vga.h> + +u8 +nv_rdport(void *obj, int head, u16 port) +{ + struct nouveau_device *device = nv_device(obj); + + if (device->card_type >= NV_50) + return nv_rd08(obj, 0x601000 + port); + + if (port == 0x03c0 || port == 0x03c1 || /* AR */ + port == 0x03c2 || port == 0x03da || /* INP0 */ + port == 0x03d4 || port == 0x03d5) /* CR */ + return nv_rd08(obj, 0x601000 + (head * 0x2000) + port); + + if (port == 0x03c2 || port == 0x03cc || /* MISC */ + port == 0x03c4 || port == 0x03c5 || /* SR */ + port == 0x03ce || port == 0x03cf) { /* GR */ + if (device->card_type < NV_40) + head = 0; /* CR44 selects head */ + return nv_rd08(obj, 0x0c0000 + (head * 0x2000) + port); + } + + nv_error(obj, "unknown vga port 0x%04x\n", port); + return 0x00; +} + +void +nv_wrport(void *obj, int head, u16 port, u8 data) +{ + struct nouveau_device *device = nv_device(obj); + + if (device->card_type >= NV_50) + nv_wr08(obj, 0x601000 + port, data); + else + if (port == 0x03c0 || port == 0x03c1 || /* AR */ + port == 0x03c2 || port == 0x03da || /* INP0 */ + port == 0x03d4 || port == 0x03d5) /* CR */ + nv_wr08(obj, 0x601000 + (head * 0x2000) + port, data); + else + if (port == 0x03c2 || port == 0x03cc || /* MISC */ + port == 0x03c4 || port == 0x03c5 || /* SR */ + port == 0x03ce || port == 0x03cf) { /* GR */ + if (device->card_type < NV_40) + head = 0; /* CR44 selects head */ + nv_wr08(obj, 0x0c0000 + (head * 0x2000) + port, data); + } else + nv_error(obj, "unknown vga port 0x%04x\n", port); +} + +u8 +nv_rdvgas(void *obj, int head, u8 index) +{ + nv_wrport(obj, head, 0x03c4, index); + return nv_rdport(obj, head, 0x03c5); +} + +void +nv_wrvgas(void *obj, int head, u8 index, u8 value) +{ + nv_wrport(obj, head, 0x03c4, index); + nv_wrport(obj, head, 0x03c5, value); +} + +u8 +nv_rdvgag(void *obj, int head, u8 index) +{ + nv_wrport(obj, head, 0x03ce, index); + return nv_rdport(obj, head, 0x03cf); +} + +void +nv_wrvgag(void *obj, int head, u8 index, u8 value) +{ + nv_wrport(obj, head, 0x03ce, index); + nv_wrport(obj, head, 0x03cf, value); +} + +u8 +nv_rdvgac(void *obj, int head, u8 index) +{ + nv_wrport(obj, head, 0x03d4, index); + return nv_rdport(obj, head, 0x03d5); +} + +void +nv_wrvgac(void *obj, int head, u8 index, u8 value) +{ + nv_wrport(obj, head, 0x03d4, index); + nv_wrport(obj, head, 0x03d5, value); +} + +u8 +nv_rdvgai(void *obj, int head, u16 port, u8 index) +{ + if (port == 0x03c4) return nv_rdvgas(obj, head, index); + if (port == 0x03ce) return nv_rdvgag(obj, head, index); + if (port == 0x03d4) return nv_rdvgac(obj, head, index); + nv_error(obj, "unknown indexed vga port 0x%04x\n", port); + return 0x00; +} + +void +nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value) +{ + if (port == 0x03c4) nv_wrvgas(obj, head, index, value); + else if (port == 0x03ce) nv_wrvgag(obj, head, index, value); + else if (port == 0x03d4) nv_wrvgac(obj, head, index, value); + else nv_error(obj, "unknown indexed vga port 0x%04x\n", port); +} + +bool +nv_lockvgac(void *obj, bool lock) +{ + bool locked = !nv_rdvgac(obj, 0, 0x1f); + u8 data = lock ? 0x99 : 0x57; + nv_wrvgac(obj, 0, 0x1f, data); + if (nv_device(obj)->chipset == 0x11) { + if (!(nv_rd32(obj, 0x001084) & 0x10000000)) + nv_wrvgac(obj, 1, 0x1f, data); + } + return locked; +} + +/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied) + * it affects only the 8 bit vga io regs, which we access using mmio at + * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d* + * in general, the set value of cr44 does not matter: reg access works as + * expected and values can be set for the appropriate head by using a 0x2000 + * offset as required + * however: + * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and + * cr44 must be set to 0 or 3 for accessing values on the correct head + * through the common 0xc03c* addresses + * b) in tied mode (4) head B is programmed to the values set on head A, and + * access using the head B addresses can have strange results, ergo we leave + * tied mode in init once we know to what cr44 should be restored on exit + * + * the owner parameter is slightly abused: + * 0 and 1 are treated as head values and so the set value is (owner * 3) + * other values are treated as literal values to set + */ +u8 +nv_rdvgaowner(void *obj) +{ + if (nv_device(obj)->card_type < NV_50) { + if (nv_device(obj)->chipset == 0x11) { + u32 tied = nv_rd32(obj, 0x001084) & 0x10000000; + if (tied == 0) { + u8 slA = nv_rdvgac(obj, 0, 0x28) & 0x80; + u8 tvA = nv_rdvgac(obj, 0, 0x33) & 0x01; + u8 slB = nv_rdvgac(obj, 1, 0x28) & 0x80; + u8 tvB = nv_rdvgac(obj, 1, 0x33) & 0x01; + if (slA && !tvA) return 0x00; + if (slB && !tvB) return 0x03; + if (slA) return 0x00; + if (slB) return 0x03; + return 0x00; + } + return 0x04; + } + + return nv_rdvgac(obj, 0, 0x44); + } + + nv_error(obj, "rdvgaowner after nv4x\n"); + return 0x00; +} + +void +nv_wrvgaowner(void *obj, u8 select) +{ + if (nv_device(obj)->card_type < NV_50) { + u8 owner = (select == 1) ? 3 : select; + if (nv_device(obj)->chipset == 0x11) { + /* workaround hw lockup bug */ + nv_rdvgac(obj, 0, 0x1f); + nv_rdvgac(obj, 1, 0x1f); + } + + nv_wrvgac(obj, 0, 0x44, owner); + + if (nv_device(obj)->chipset == 0x11) { + nv_wrvgac(obj, 0, 0x2e, owner); + nv_wrvgac(obj, 0, 0x2e, owner); + } + } else + nv_error(obj, "wrvgaowner after nv4x\n"); +} diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c new file mode 100644 index 0000000..5103e88 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c @@ -0,0 +1,120 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <core/class.h> + +#include <subdev/fb.h> +#include <engine/dmaobj.h> + +static int +nouveau_dmaobj_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_dmaeng *dmaeng = (void *)engine; + struct nouveau_dmaobj *dmaobj; + struct nouveau_gpuobj *gpuobj; + struct nv_dma_class *args = data; + int ret; + + if (size < sizeof(*args)) + return -EINVAL; + + ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj); + *pobject = nv_object(dmaobj); + if (ret) + return ret; + + switch (args->flags & NV_DMA_TARGET_MASK) { + case NV_DMA_TARGET_VM: + dmaobj->target = NV_MEM_TARGET_VM; + break; + case NV_DMA_TARGET_VRAM: + dmaobj->target = NV_MEM_TARGET_VRAM; + break; + case NV_DMA_TARGET_PCI: + dmaobj->target = NV_MEM_TARGET_PCI; + break; + case NV_DMA_TARGET_PCI_US: + case NV_DMA_TARGET_AGP: + dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP; + break; + default: + return -EINVAL; + } + + switch (args->flags & NV_DMA_ACCESS_MASK) { + case NV_DMA_ACCESS_VM: + dmaobj->access = NV_MEM_ACCESS_VM; + break; + case NV_DMA_ACCESS_RD: + dmaobj->access = NV_MEM_ACCESS_RO; + break; + case NV_DMA_ACCESS_WR: + dmaobj->access = NV_MEM_ACCESS_WO; + break; + case NV_DMA_ACCESS_RDWR: + dmaobj->access = NV_MEM_ACCESS_RW; + break; + default: + return -EINVAL; + } + + dmaobj->start = args->start; + dmaobj->limit = args->limit; + dmaobj->conf0 = args->conf0; + + switch (nv_mclass(parent)) { + case NV_DEVICE_CLASS: + /* delayed, or no, binding */ + break; + default: + ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj); + if (ret == 0) { + nouveau_object_ref(NULL, pobject); + *pobject = nv_object(gpuobj); + } + break; + } + + return ret; +} + +static struct nouveau_ofuncs +nouveau_dmaobj_ofuncs = { + .ctor = nouveau_dmaobj_ctor, + .dtor = nouveau_object_destroy, + .init = nouveau_object_init, + .fini = nouveau_object_fini, +}; + +struct nouveau_oclass +nouveau_dmaobj_sclass[] = { + { NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, + { NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, + { NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, + {} +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c new file mode 100644 index 0000000..027d821 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c @@ -0,0 +1,143 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/gpuobj.h> +#include <core/class.h> + +#include <subdev/fb.h> +#include <subdev/vm/nv04.h> + +#include <engine/dmaobj.h> + +struct nv04_dmaeng_priv { + struct nouveau_dmaeng base; +}; + +static int +nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng, + struct nouveau_object *parent, + struct nouveau_dmaobj *dmaobj, + struct nouveau_gpuobj **pgpuobj) +{ + struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaeng); + struct nouveau_gpuobj *gpuobj; + u32 flags0 = nv_mclass(dmaobj); + u32 flags2 = 0x00000000; + u64 offset = dmaobj->start & 0xfffff000; + u64 adjust = dmaobj->start & 0x00000fff; + u32 length = dmaobj->limit - dmaobj->start; + int ret; + + if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { + switch (nv_mclass(parent->parent)) { + case NV03_CHANNEL_DMA_CLASS: + case NV10_CHANNEL_DMA_CLASS: + case NV17_CHANNEL_DMA_CLASS: + case NV40_CHANNEL_DMA_CLASS: + break; + default: + return -EINVAL; + } + } + + if (dmaobj->target == NV_MEM_TARGET_VM) { + if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) { + struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0]; + if (!dmaobj->start) + return nouveau_gpuobj_dup(parent, pgt, pgpuobj); + offset = nv_ro32(pgt, 8 + (offset >> 10)); + offset &= 0xfffff000; + } + + dmaobj->target = NV_MEM_TARGET_PCI; + dmaobj->access = NV_MEM_ACCESS_RW; + } + + switch (dmaobj->target) { + case NV_MEM_TARGET_VRAM: + flags0 |= 0x00003000; + break; + case NV_MEM_TARGET_PCI: + flags0 |= 0x00023000; + break; + case NV_MEM_TARGET_PCI_NOSNOOP: + flags0 |= 0x00033000; + break; + default: + return -EINVAL; + } + + switch (dmaobj->access) { + case NV_MEM_ACCESS_RO: + flags0 |= 0x00004000; + break; + case NV_MEM_ACCESS_WO: + flags0 |= 0x00008000; + case NV_MEM_ACCESS_RW: + flags2 |= 0x00000002; + break; + default: + return -EINVAL; + } + + ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj); + *pgpuobj = gpuobj; + if (ret == 0) { + nv_wo32(*pgpuobj, 0x00, flags0 | (adjust << 20)); + nv_wo32(*pgpuobj, 0x04, length); + nv_wo32(*pgpuobj, 0x08, flags2 | offset); + nv_wo32(*pgpuobj, 0x0c, flags2 | offset); + } + + return ret; +} + +static int +nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_dmaeng_priv *priv; + int ret; + + ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_engine(priv)->sclass = nouveau_dmaobj_sclass; + priv->base.bind = nv04_dmaobj_bind; + return 0; +} + +struct nouveau_oclass +nv04_dmaeng_oclass = { + .handle = NV_ENGINE(DMAOBJ, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_dmaeng_ctor, + .dtor = _nouveau_dmaeng_dtor, + .init = _nouveau_dmaeng_init, + .fini = _nouveau_dmaeng_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c new file mode 100644 index 0000000..750183f --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c @@ -0,0 +1,161 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/gpuobj.h> +#include <core/class.h> + +#include <subdev/fb.h> +#include <engine/dmaobj.h> + +struct nv50_dmaeng_priv { + struct nouveau_dmaeng base; +}; + +static int +nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng, + struct nouveau_object *parent, + struct nouveau_dmaobj *dmaobj, + struct nouveau_gpuobj **pgpuobj) +{ + u32 flags0 = nv_mclass(dmaobj); + u32 flags5 = 0x00000000; + int ret; + + if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { + switch (nv_mclass(parent->parent)) { + case NV50_CHANNEL_DMA_CLASS: + case NV84_CHANNEL_DMA_CLASS: + case NV50_CHANNEL_IND_CLASS: + case NV84_CHANNEL_IND_CLASS: + case NV50_DISP_MAST_CLASS: + case NV84_DISP_MAST_CLASS: + case NV94_DISP_MAST_CLASS: + case NVA0_DISP_MAST_CLASS: + case NVA3_DISP_MAST_CLASS: + case NV50_DISP_SYNC_CLASS: + case NV84_DISP_SYNC_CLASS: + case NV94_DISP_SYNC_CLASS: + case NVA0_DISP_SYNC_CLASS: + case NVA3_DISP_SYNC_CLASS: + case NV50_DISP_OVLY_CLASS: + case NV84_DISP_OVLY_CLASS: + case NV94_DISP_OVLY_CLASS: + case NVA0_DISP_OVLY_CLASS: + case NVA3_DISP_OVLY_CLASS: + break; + default: + return -EINVAL; + } + } + + if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) { + if (dmaobj->target == NV_MEM_TARGET_VM) { + dmaobj->conf0 = NV50_DMA_CONF0_PRIV_VM; + dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM; + dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM; + dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM; + } else { + dmaobj->conf0 = NV50_DMA_CONF0_PRIV_US; + dmaobj->conf0 |= NV50_DMA_CONF0_PART_256; + dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE; + dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR; + } + } + + flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22; + flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22; + flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV); + flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART); + + switch (dmaobj->target) { + case NV_MEM_TARGET_VM: + flags0 |= 0x00000000; + break; + case NV_MEM_TARGET_VRAM: + flags0 |= 0x00010000; + break; + case NV_MEM_TARGET_PCI: + flags0 |= 0x00020000; + break; + case NV_MEM_TARGET_PCI_NOSNOOP: + flags0 |= 0x00030000; + break; + default: + return -EINVAL; + } + + switch (dmaobj->access) { + case NV_MEM_ACCESS_VM: + break; + case NV_MEM_ACCESS_RO: + flags0 |= 0x00040000; + break; + case NV_MEM_ACCESS_WO: + case NV_MEM_ACCESS_RW: + flags0 |= 0x00080000; + break; + } + + ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); + if (ret == 0) { + nv_wo32(*pgpuobj, 0x00, flags0); + nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit)); + nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start)); + nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 | + upper_32_bits(dmaobj->start)); + nv_wo32(*pgpuobj, 0x10, 0x00000000); + nv_wo32(*pgpuobj, 0x14, flags5); + } + + return ret; +} + +static int +nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_dmaeng_priv *priv; + int ret; + + ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_engine(priv)->sclass = nouveau_dmaobj_sclass; + priv->base.bind = nv50_dmaobj_bind; + return 0; +} + +struct nouveau_oclass +nv50_dmaeng_oclass = { + .handle = NV_ENGINE(DMAOBJ, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_dmaeng_ctor, + .dtor = _nouveau_dmaeng_dtor, + .init = _nouveau_dmaeng_init, + .fini = _nouveau_dmaeng_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c new file mode 100644 index 0000000..cd3970d --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c @@ -0,0 +1,143 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/device.h> +#include <core/gpuobj.h> +#include <core/class.h> + +#include <subdev/fb.h> +#include <engine/dmaobj.h> + +struct nvc0_dmaeng_priv { + struct nouveau_dmaeng base; +}; + +static int +nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, + struct nouveau_object *parent, + struct nouveau_dmaobj *dmaobj, + struct nouveau_gpuobj **pgpuobj) +{ + u32 flags0 = nv_mclass(dmaobj); + u32 flags5 = 0x00000000; + int ret; + + if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { + switch (nv_mclass(parent->parent)) { + case NVA3_DISP_MAST_CLASS: + case NVA3_DISP_SYNC_CLASS: + case NVA3_DISP_OVLY_CLASS: + break; + default: + return -EINVAL; + } + } else + return 0; + + if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) { + if (dmaobj->target == NV_MEM_TARGET_VM) { + dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_VM; + dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM; + } else { + dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_US; + dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR; + dmaobj->conf0 |= 0x00020000; + } + } + + flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22; + flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV); + flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN); + + switch (dmaobj->target) { + case NV_MEM_TARGET_VM: + flags0 |= 0x00000000; + break; + case NV_MEM_TARGET_VRAM: + flags0 |= 0x00010000; + break; + case NV_MEM_TARGET_PCI: + flags0 |= 0x00020000; + break; + case NV_MEM_TARGET_PCI_NOSNOOP: + flags0 |= 0x00030000; + break; + default: + return -EINVAL; + } + + switch (dmaobj->access) { + case NV_MEM_ACCESS_VM: + break; + case NV_MEM_ACCESS_RO: + flags0 |= 0x00040000; + break; + case NV_MEM_ACCESS_WO: + case NV_MEM_ACCESS_RW: + flags0 |= 0x00080000; + break; + } + + ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); + if (ret == 0) { + nv_wo32(*pgpuobj, 0x00, flags0); + nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit)); + nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start)); + nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 | + upper_32_bits(dmaobj->start)); + nv_wo32(*pgpuobj, 0x10, 0x00000000); + nv_wo32(*pgpuobj, 0x14, flags5); + } + + return ret; +} + +static int +nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_dmaeng_priv *priv; + int ret; + + ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_engine(priv)->sclass = nouveau_dmaobj_sclass; + priv->base.bind = nvc0_dmaobj_bind; + return 0; +} + +struct nouveau_oclass +nvc0_dmaeng_oclass = { + .handle = NV_ENGINE(DMAOBJ, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_dmaeng_ctor, + .dtor = _nouveau_dmaeng_dtor, + .init = _nouveau_dmaeng_init, + .fini = _nouveau_dmaeng_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c new file mode 100644 index 0000000..944e73a --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c @@ -0,0 +1,125 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/device.h> +#include <core/gpuobj.h> +#include <core/class.h> + +#include <subdev/fb.h> +#include <engine/dmaobj.h> + +struct nvd0_dmaeng_priv { + struct nouveau_dmaeng base; +}; + +static int +nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, + struct nouveau_object *parent, + struct nouveau_dmaobj *dmaobj, + struct nouveau_gpuobj **pgpuobj) +{ + u32 flags0 = 0x00000000; + int ret; + + if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { + switch (nv_mclass(parent->parent)) { + case NVD0_DISP_MAST_CLASS: + case NVD0_DISP_SYNC_CLASS: + case NVD0_DISP_OVLY_CLASS: + case NVE0_DISP_MAST_CLASS: + case NVE0_DISP_SYNC_CLASS: + case NVE0_DISP_OVLY_CLASS: + case NVF0_DISP_MAST_CLASS: + case NVF0_DISP_SYNC_CLASS: + case NVF0_DISP_OVLY_CLASS: + break; + default: + return -EINVAL; + } + } else + return 0; + + if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) { + if (dmaobj->target == NV_MEM_TARGET_VM) { + dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM; + dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP; + } else { + dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR; + dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP; + } + } + + flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20; + flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4; + + switch (dmaobj->target) { + case NV_MEM_TARGET_VRAM: + flags0 |= 0x00000009; + break; + default: + return -EINVAL; + break; + } + + ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); + if (ret == 0) { + nv_wo32(*pgpuobj, 0x00, flags0); + nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8); + nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8); + nv_wo32(*pgpuobj, 0x0c, 0x00000000); + nv_wo32(*pgpuobj, 0x10, 0x00000000); + nv_wo32(*pgpuobj, 0x14, 0x00000000); + } + + return ret; +} + +static int +nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvd0_dmaeng_priv *priv; + int ret; + + ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_engine(priv)->sclass = nouveau_dmaobj_sclass; + priv->base.bind = nvd0_dmaobj_bind; + return 0; +} + +struct nouveau_oclass +nvd0_dmaeng_oclass = { + .handle = NV_ENGINE(DMAOBJ, 0xd0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvd0_dmaeng_ctor, + .dtor = _nouveau_dmaeng_dtor, + .init = _nouveau_dmaeng_init, + .fini = _nouveau_dmaeng_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c new file mode 100644 index 0000000..d3ec436 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c @@ -0,0 +1,208 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/client.h> +#include <core/object.h> +#include <core/handle.h> +#include <core/event.h> +#include <core/class.h> + +#include <engine/dmaobj.h> +#include <engine/fifo.h> + +int +nouveau_fifo_channel_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, + int bar, u32 addr, u32 size, u32 pushbuf, + u64 engmask, int len, void **ptr) +{ + struct nouveau_device *device = nv_device(engine); + struct nouveau_fifo *priv = (void *)engine; + struct nouveau_fifo_chan *chan; + struct nouveau_dmaeng *dmaeng; + unsigned long flags; + int ret; + + /* create base object class */ + ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL, + engmask, len, ptr); + chan = *ptr; + if (ret) + return ret; + + /* validate dma object representing push buffer */ + chan->pushdma = (void *)nouveau_handle_ref(parent, pushbuf); + if (!chan->pushdma) + return -ENOENT; + + dmaeng = (void *)chan->pushdma->base.engine; + switch (chan->pushdma->base.oclass->handle) { + case NV_DMA_FROM_MEMORY_CLASS: + case NV_DMA_IN_MEMORY_CLASS: + break; + default: + return -EINVAL; + } + + ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu); + if (ret) + return ret; + + /* find a free fifo channel */ + spin_lock_irqsave(&priv->lock, flags); + for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) { + if (!priv->channel[chan->chid]) { + priv->channel[chan->chid] = nv_object(chan); + break; + } + } + spin_unlock_irqrestore(&priv->lock, flags); + + if (chan->chid == priv->max) { + nv_error(priv, "no free channels\n"); + return -ENOSPC; + } + + /* map fifo control registers */ + chan->user = ioremap(pci_resource_start(device->pdev, bar) + addr + + (chan->chid * size), size); + if (!chan->user) + return -EFAULT; + + nouveau_event_trigger(priv->cevent, 0); + + chan->size = size; + return 0; +} + +void +nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan) +{ + struct nouveau_fifo *priv = (void *)nv_object(chan)->engine; + unsigned long flags; + + iounmap(chan->user); + + spin_lock_irqsave(&priv->lock, flags); + priv->channel[chan->chid] = NULL; + spin_unlock_irqrestore(&priv->lock, flags); + + nouveau_gpuobj_ref(NULL, &chan->pushgpu); + nouveau_object_ref(NULL, (struct nouveau_object **)&chan->pushdma); + nouveau_namedb_destroy(&chan->base); +} + +void +_nouveau_fifo_channel_dtor(struct nouveau_object *object) +{ + struct nouveau_fifo_chan *chan = (void *)object; + nouveau_fifo_channel_destroy(chan); +} + +u32 +_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr) +{ + struct nouveau_fifo_chan *chan = (void *)object; + return ioread32_native(chan->user + addr); +} + +void +_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data) +{ + struct nouveau_fifo_chan *chan = (void *)object; + iowrite32_native(data, chan->user + addr); +} + +static int +nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object) +{ + int engidx = nv_hclass(priv) & 0xff; + + while (object && object->parent) { + if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) && + (nv_hclass(object->parent) & 0xff) == engidx) + return nouveau_fifo_chan(object)->chid; + object = object->parent; + } + + return -1; +} + +const char * +nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid) +{ + struct nouveau_fifo_chan *chan = NULL; + unsigned long flags; + + spin_lock_irqsave(&fifo->lock, flags); + if (chid >= fifo->min && chid <= fifo->max) + chan = (void *)fifo->channel[chid]; + spin_unlock_irqrestore(&fifo->lock, flags); + + return nouveau_client_name(chan); +} + +void +nouveau_fifo_destroy(struct nouveau_fifo *priv) +{ + kfree(priv->channel); + nouveau_event_destroy(&priv->uevent); + nouveau_event_destroy(&priv->cevent); + nouveau_engine_destroy(&priv->base); +} + +int +nouveau_fifo_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, + int min, int max, int length, void **pobject) +{ + struct nouveau_fifo *priv; + int ret; + + ret = nouveau_engine_create_(parent, engine, oclass, true, "PFIFO", + "fifo", length, pobject); + priv = *pobject; + if (ret) + return ret; + + priv->min = min; + priv->max = max; + priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL); + if (!priv->channel) + return -ENOMEM; + + ret = nouveau_event_create(1, &priv->cevent); + if (ret) + return ret; + + ret = nouveau_event_create(1, &priv->uevent); + if (ret) + return ret; + + priv->chid = nouveau_fifo_chid; + spin_lock_init(&priv->lock); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c new file mode 100644 index 0000000..f877bd5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c @@ -0,0 +1,644 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> +#include <core/namedb.h> +#include <core/handle.h> +#include <core/ramht.h> +#include <core/event.h> + +#include <subdev/instmem.h> +#include <subdev/instmem/nv04.h> +#include <subdev/timer.h> +#include <subdev/fb.h> + +#include <engine/fifo.h> + +#include "nv04.h" + +static struct ramfc_desc +nv04_ramfc[] = { + { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, + { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, + { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, + { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, + { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE }, + { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, + { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE }, + { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 }, + {} +}; + +/******************************************************************************* + * FIFO channel objects + ******************************************************************************/ + +int +nv04_fifo_object_attach(struct nouveau_object *parent, + struct nouveau_object *object, u32 handle) +{ + struct nv04_fifo_priv *priv = (void *)parent->engine; + struct nv04_fifo_chan *chan = (void *)parent; + u32 context, chid = chan->base.chid; + int ret; + + if (nv_iclass(object, NV_GPUOBJ_CLASS)) + context = nv_gpuobj(object)->addr >> 4; + else + context = 0x00000004; /* just non-zero */ + + switch (nv_engidx(object->engine)) { + case NVDEV_ENGINE_DMAOBJ: + case NVDEV_ENGINE_SW: + context |= 0x00000000; + break; + case NVDEV_ENGINE_GR: + context |= 0x00010000; + break; + case NVDEV_ENGINE_MPEG: + context |= 0x00020000; + break; + default: + return -EINVAL; + } + + context |= 0x80000000; /* valid */ + context |= chid << 24; + + mutex_lock(&nv_subdev(priv)->mutex); + ret = nouveau_ramht_insert(priv->ramht, chid, handle, context); + mutex_unlock(&nv_subdev(priv)->mutex); + return ret; +} + +void +nv04_fifo_object_detach(struct nouveau_object *parent, int cookie) +{ + struct nv04_fifo_priv *priv = (void *)parent->engine; + mutex_lock(&nv_subdev(priv)->mutex); + nouveau_ramht_remove(priv->ramht, cookie); + mutex_unlock(&nv_subdev(priv)->mutex); +} + +int +nv04_fifo_context_attach(struct nouveau_object *parent, + struct nouveau_object *object) +{ + nv_engctx(object)->addr = nouveau_fifo_chan(parent)->chid; + return 0; +} + +static int +nv04_fifo_chan_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_fifo_priv *priv = (void *)engine; + struct nv04_fifo_chan *chan; + struct nv03_channel_dma_class *args = data; + int ret; + + if (size < sizeof(*args)) + return -EINVAL; + + ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, + 0x10000, args->pushbuf, + (1ULL << NVDEV_ENGINE_DMAOBJ) | + (1ULL << NVDEV_ENGINE_SW) | + (1ULL << NVDEV_ENGINE_GR), &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + nv_parent(chan)->object_attach = nv04_fifo_object_attach; + nv_parent(chan)->object_detach = nv04_fifo_object_detach; + nv_parent(chan)->context_attach = nv04_fifo_context_attach; + chan->ramfc = chan->base.chid * 32; + + nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset); + nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset); + nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4); + nv_wo32(priv->ramfc, chan->ramfc + 0x10, + NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | +#ifdef __BIG_ENDIAN + NV_PFIFO_CACHE1_BIG_ENDIAN | +#endif + NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); + return 0; +} + +void +nv04_fifo_chan_dtor(struct nouveau_object *object) +{ + struct nv04_fifo_priv *priv = (void *)object->engine; + struct nv04_fifo_chan *chan = (void *)object; + struct ramfc_desc *c = priv->ramfc_desc; + + do { + nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000); + } while ((++c)->bits); + + nouveau_fifo_channel_destroy(&chan->base); +} + +int +nv04_fifo_chan_init(struct nouveau_object *object) +{ + struct nv04_fifo_priv *priv = (void *)object->engine; + struct nv04_fifo_chan *chan = (void *)object; + u32 mask = 1 << chan->base.chid; + unsigned long flags; + int ret; + + ret = nouveau_fifo_channel_init(&chan->base); + if (ret) + return ret; + + spin_lock_irqsave(&priv->base.lock, flags); + nv_mask(priv, NV04_PFIFO_MODE, mask, mask); + spin_unlock_irqrestore(&priv->base.lock, flags); + return 0; +} + +int +nv04_fifo_chan_fini(struct nouveau_object *object, bool suspend) +{ + struct nv04_fifo_priv *priv = (void *)object->engine; + struct nv04_fifo_chan *chan = (void *)object; + struct nouveau_gpuobj *fctx = priv->ramfc; + struct ramfc_desc *c; + unsigned long flags; + u32 data = chan->ramfc; + u32 chid; + + /* prevent fifo context switches */ + spin_lock_irqsave(&priv->base.lock, flags); + nv_wr32(priv, NV03_PFIFO_CACHES, 0); + + /* if this channel is active, replace it with a null context */ + chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; + if (chid == chan->base.chid) { + nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); + nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0); + nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); + + c = priv->ramfc_desc; + do { + u32 rm = ((1ULL << c->bits) - 1) << c->regs; + u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; + u32 rv = (nv_rd32(priv, c->regp) & rm) >> c->regs; + u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm); + nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs)); + } while ((++c)->bits); + + c = priv->ramfc_desc; + do { + nv_wr32(priv, c->regp, 0x00000000); + } while ((++c)->bits); + + nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0); + nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0); + nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max); + nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1); + nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); + } + + /* restore normal operation, after disabling dma mode */ + nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0); + nv_wr32(priv, NV03_PFIFO_CACHES, 1); + spin_unlock_irqrestore(&priv->base.lock, flags); + + return nouveau_fifo_channel_fini(&chan->base, suspend); +} + +static struct nouveau_ofuncs +nv04_fifo_ofuncs = { + .ctor = nv04_fifo_chan_ctor, + .dtor = nv04_fifo_chan_dtor, + .init = nv04_fifo_chan_init, + .fini = nv04_fifo_chan_fini, + .rd32 = _nouveau_fifo_channel_rd32, + .wr32 = _nouveau_fifo_channel_wr32, +}; + +static struct nouveau_oclass +nv04_fifo_sclass[] = { + { NV03_CHANNEL_DMA_CLASS, &nv04_fifo_ofuncs }, + {} +}; + +/******************************************************************************* + * FIFO context - basically just the instmem reserved for the channel + ******************************************************************************/ + +int +nv04_fifo_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_fifo_base *base; + int ret; + + ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000, + 0x1000, NVOBJ_FLAG_HEAP, &base); + *pobject = nv_object(base); + if (ret) + return ret; + + return 0; +} + +static struct nouveau_oclass +nv04_fifo_cclass = { + .handle = NV_ENGCTX(FIFO, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_fifo_context_ctor, + .dtor = _nouveau_fifo_context_dtor, + .init = _nouveau_fifo_context_init, + .fini = _nouveau_fifo_context_fini, + .rd32 = _nouveau_fifo_context_rd32, + .wr32 = _nouveau_fifo_context_wr32, + }, +}; + +/******************************************************************************* + * PFIFO engine + ******************************************************************************/ + +void +nv04_fifo_pause(struct nouveau_fifo *pfifo, unsigned long *pflags) +__acquires(priv->base.lock) +{ + struct nv04_fifo_priv *priv = (void *)pfifo; + unsigned long flags; + + spin_lock_irqsave(&priv->base.lock, flags); + *pflags = flags; + + nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000); + nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000); + + /* in some cases the puller may be left in an inconsistent state + * if you try to stop it while it's busy translating handles. + * sometimes you get a CACHE_ERROR, sometimes it just fails + * silently; sending incorrect instance offsets to PGRAPH after + * it's started up again. + * + * to avoid this, we invalidate the most recently calculated + * instance. + */ + if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0, + NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000)) + nv_warn(priv, "timeout idling puller\n"); + + if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) & + NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) + nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); + + nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000); +} + +void +nv04_fifo_start(struct nouveau_fifo *pfifo, unsigned long *pflags) +__releases(priv->base.lock) +{ + struct nv04_fifo_priv *priv = (void *)pfifo; + unsigned long flags = *pflags; + + nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001); + nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001); + + spin_unlock_irqrestore(&priv->base.lock, flags); +} + +static const char * +nv_dma_state_err(u32 state) +{ + static const char * const desc[] = { + "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE", + "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK" + }; + return desc[(state >> 29) & 0x7]; +} + +static bool +nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data) +{ + struct nv04_fifo_chan *chan = NULL; + struct nouveau_handle *bind; + const int subc = (addr >> 13) & 0x7; + const int mthd = addr & 0x1ffc; + bool handled = false; + unsigned long flags; + u32 engine; + + spin_lock_irqsave(&priv->base.lock, flags); + if (likely(chid >= priv->base.min && chid <= priv->base.max)) + chan = (void *)priv->base.channel[chid]; + if (unlikely(!chan)) + goto out; + + switch (mthd) { + case 0x0000: + bind = nouveau_namedb_get(nv_namedb(chan), data); + if (unlikely(!bind)) + break; + + if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) { + engine = 0x0000000f << (subc * 4); + chan->subc[subc] = data; + handled = true; + + nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0); + } + + nouveau_namedb_put(bind); + break; + default: + engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE); + if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) + break; + + bind = nouveau_namedb_get(nv_namedb(chan), chan->subc[subc]); + if (likely(bind)) { + if (!nv_call(bind->object, mthd, data)) + handled = true; + nouveau_namedb_put(bind); + } + break; + } + +out: + spin_unlock_irqrestore(&priv->base.lock, flags); + return handled; +} + +static void +nv04_fifo_cache_error(struct nouveau_device *device, + struct nv04_fifo_priv *priv, u32 chid, u32 get) +{ + u32 mthd, data; + int ptr; + + /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my + * G80 chips, but CACHE1 isn't big enough for this much data.. Tests + * show that it wraps around to the start at GET=0x800.. No clue as to + * why.. + */ + ptr = (get & 0x7ff) >> 2; + + if (device->card_type < NV_40) { + mthd = nv_rd32(priv, NV04_PFIFO_CACHE1_METHOD(ptr)); + data = nv_rd32(priv, NV04_PFIFO_CACHE1_DATA(ptr)); + } else { + mthd = nv_rd32(priv, NV40_PFIFO_CACHE1_METHOD(ptr)); + data = nv_rd32(priv, NV40_PFIFO_CACHE1_DATA(ptr)); + } + + if (!nv04_fifo_swmthd(priv, chid, mthd, data)) { + const char *client_name = + nouveau_client_name_for_fifo_chid(&priv->base, chid); + nv_error(priv, + "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", + chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc, + data); + } + + nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0); + nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); + + nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, + nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1); + nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); + nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, + nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1); + nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0); + + nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, + nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); + nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); +} + +static void +nv04_fifo_dma_pusher(struct nouveau_device *device, struct nv04_fifo_priv *priv, + u32 chid) +{ + const char *client_name; + u32 dma_get = nv_rd32(priv, 0x003244); + u32 dma_put = nv_rd32(priv, 0x003240); + u32 push = nv_rd32(priv, 0x003220); + u32 state = nv_rd32(priv, 0x003228); + + client_name = nouveau_client_name_for_fifo_chid(&priv->base, chid); + + if (device->card_type == NV_50) { + u32 ho_get = nv_rd32(priv, 0x003328); + u32 ho_put = nv_rd32(priv, 0x003320); + u32 ib_get = nv_rd32(priv, 0x003334); + u32 ib_put = nv_rd32(priv, 0x003330); + + nv_error(priv, + "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n", + chid, client_name, ho_get, dma_get, ho_put, dma_put, + ib_get, ib_put, state, nv_dma_state_err(state), push); + + /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ + nv_wr32(priv, 0x003364, 0x00000000); + if (dma_get != dma_put || ho_get != ho_put) { + nv_wr32(priv, 0x003244, dma_put); + nv_wr32(priv, 0x003328, ho_put); + } else + if (ib_get != ib_put) + nv_wr32(priv, 0x003334, ib_put); + } else { + nv_error(priv, + "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n", + chid, client_name, dma_get, dma_put, state, + nv_dma_state_err(state), push); + + if (dma_get != dma_put) + nv_wr32(priv, 0x003244, dma_put); + } + + nv_wr32(priv, 0x003228, 0x00000000); + nv_wr32(priv, 0x003220, 0x00000001); + nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); +} + +void +nv04_fifo_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_device *device = nv_device(subdev); + struct nv04_fifo_priv *priv = (void *)subdev; + uint32_t status, reassign; + int cnt = 0; + + reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; + while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { + uint32_t chid, get; + + nv_wr32(priv, NV03_PFIFO_CACHES, 0); + + chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; + get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); + + if (status & NV_PFIFO_INTR_CACHE_ERROR) { + nv04_fifo_cache_error(device, priv, chid, get); + status &= ~NV_PFIFO_INTR_CACHE_ERROR; + } + + if (status & NV_PFIFO_INTR_DMA_PUSHER) { + nv04_fifo_dma_pusher(device, priv, chid); + status &= ~NV_PFIFO_INTR_DMA_PUSHER; + } + + if (status & NV_PFIFO_INTR_SEMAPHORE) { + uint32_t sem; + + status &= ~NV_PFIFO_INTR_SEMAPHORE; + nv_wr32(priv, NV03_PFIFO_INTR_0, + NV_PFIFO_INTR_SEMAPHORE); + + sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); + nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); + + nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); + nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); + } + + if (device->card_type == NV_50) { + if (status & 0x00000010) { + status &= ~0x00000010; + nv_wr32(priv, 0x002100, 0x00000010); + } + + if (status & 0x40000000) { + nouveau_event_trigger(priv->base.uevent, 0); + nv_wr32(priv, 0x002100, 0x40000000); + status &= ~0x40000000; + } + } + + if (status) { + nv_warn(priv, "unknown intr 0x%08x, ch %d\n", + status, chid); + nv_wr32(priv, NV03_PFIFO_INTR_0, status); + status = 0; + } + + nv_wr32(priv, NV03_PFIFO_CACHES, reassign); + } + + if (status) { + nv_error(priv, "still angry after %d spins, halt\n", cnt); + nv_wr32(priv, 0x002140, 0); + nv_wr32(priv, 0x000140, 0); + } + + nv_wr32(priv, 0x000100, 0x00000100); +} + +static int +nv04_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_instmem_priv *imem = nv04_instmem(parent); + struct nv04_fifo_priv *priv; + int ret; + + ret = nouveau_fifo_create(parent, engine, oclass, 0, 15, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nouveau_ramht_ref(imem->ramht, &priv->ramht); + nouveau_gpuobj_ref(imem->ramro, &priv->ramro); + nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc); + + nv_subdev(priv)->unit = 0x00000100; + nv_subdev(priv)->intr = nv04_fifo_intr; + nv_engine(priv)->cclass = &nv04_fifo_cclass; + nv_engine(priv)->sclass = nv04_fifo_sclass; + priv->base.pause = nv04_fifo_pause; + priv->base.start = nv04_fifo_start; + priv->ramfc_desc = nv04_ramfc; + return 0; +} + +void +nv04_fifo_dtor(struct nouveau_object *object) +{ + struct nv04_fifo_priv *priv = (void *)object; + nouveau_gpuobj_ref(NULL, &priv->ramfc); + nouveau_gpuobj_ref(NULL, &priv->ramro); + nouveau_ramht_ref(NULL, &priv->ramht); + nouveau_fifo_destroy(&priv->base); +} + +int +nv04_fifo_init(struct nouveau_object *object) +{ + struct nv04_fifo_priv *priv = (void *)object; + int ret; + + ret = nouveau_fifo_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff); + nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); + + nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | + ((priv->ramht->bits - 9) << 16) | + (priv->ramht->base.addr >> 8)); + nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8); + nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8); + + nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max); + + nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff); + nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff); + + nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1); + nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); + nv_wr32(priv, NV03_PFIFO_CACHES, 1); + return 0; +} + +struct nouveau_oclass +nv04_fifo_oclass = { + .handle = NV_ENGINE(FIFO, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_fifo_ctor, + .dtor = nv04_fifo_dtor, + .init = nv04_fifo_init, + .fini = _nouveau_fifo_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h new file mode 100644 index 0000000..496a4b4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h @@ -0,0 +1,178 @@ +#ifndef __NV04_FIFO_H__ +#define __NV04_FIFO_H__ + +#include <engine/fifo.h> + +#define NV04_PFIFO_DELAY_0 0x00002040 +#define NV04_PFIFO_DMA_TIMESLICE 0x00002044 +#define NV04_PFIFO_NEXT_CHANNEL 0x00002050 +#define NV03_PFIFO_INTR_0 0x00002100 +#define NV03_PFIFO_INTR_EN_0 0x00002140 +# define NV_PFIFO_INTR_CACHE_ERROR (1<<0) +# define NV_PFIFO_INTR_RUNOUT (1<<4) +# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8) +# define NV_PFIFO_INTR_DMA_PUSHER (1<<12) +# define NV_PFIFO_INTR_DMA_PT (1<<16) +# define NV_PFIFO_INTR_SEMAPHORE (1<<20) +# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24) +#define NV03_PFIFO_RAMHT 0x00002210 +#define NV03_PFIFO_RAMFC 0x00002214 +#define NV03_PFIFO_RAMRO 0x00002218 +#define NV40_PFIFO_RAMFC 0x00002220 +#define NV03_PFIFO_CACHES 0x00002500 +#define NV04_PFIFO_MODE 0x00002504 +#define NV04_PFIFO_DMA 0x00002508 +#define NV04_PFIFO_SIZE 0x0000250c +#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4) +#define NV50_PFIFO_CTX_TABLE__SIZE 128 +#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31) +#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30) +#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF +#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF +#define NV03_PFIFO_CACHE0_PUSH0 0x00003000 +#define NV03_PFIFO_CACHE0_PULL0 0x00003040 +#define NV04_PFIFO_CACHE0_PULL0 0x00003050 +#define NV04_PFIFO_CACHE0_PULL1 0x00003054 +#define NV03_PFIFO_CACHE1_PUSH0 0x00003200 +#define NV03_PFIFO_CACHE1_PUSH1 0x00003204 +#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8) +#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16) +#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f +#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f +#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f +#define NV03_PFIFO_CACHE1_PUT 0x00003210 +#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220 +#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000 +# define NV_PFIFO_CACHE1_ENDIAN 0x80000000 +# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF +# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000 +#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228 +#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c +#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230 +#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240 +#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244 +#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248 +#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C +#define NV03_PFIFO_CACHE1_PULL0 0x00003240 +#define NV04_PFIFO_CACHE1_PULL0 0x00003250 +# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010 +# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000 +#define NV03_PFIFO_CACHE1_PULL1 0x00003250 +#define NV04_PFIFO_CACHE1_PULL1 0x00003254 +#define NV04_PFIFO_CACHE1_HASH 0x00003258 +#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260 +#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264 +#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268 +#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C +#define NV03_PFIFO_CACHE1_GET 0x00003270 +#define NV04_PFIFO_CACHE1_ENGINE 0x00003280 +#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0 +#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0 +#define NV40_PFIFO_UNK32E4 0x000032E4 +#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8)) +#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8)) +#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8)) +#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8)) + +struct ramfc_desc { + unsigned bits:6; + unsigned ctxs:5; + unsigned ctxp:8; + unsigned regs:5; + unsigned regp; +}; + +struct nv04_fifo_priv { + struct nouveau_fifo base; + struct ramfc_desc *ramfc_desc; + struct nouveau_ramht *ramht; + struct nouveau_gpuobj *ramro; + struct nouveau_gpuobj *ramfc; +}; + +struct nv04_fifo_base { + struct nouveau_fifo_base base; +}; + +struct nv04_fifo_chan { + struct nouveau_fifo_chan base; + u32 subc[8]; + u32 ramfc; +}; + +int nv04_fifo_object_attach(struct nouveau_object *, + struct nouveau_object *, u32); +void nv04_fifo_object_detach(struct nouveau_object *, int); + +void nv04_fifo_chan_dtor(struct nouveau_object *); +int nv04_fifo_chan_init(struct nouveau_object *); +int nv04_fifo_chan_fini(struct nouveau_object *, bool suspend); + +int nv04_fifo_context_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); + +void nv04_fifo_dtor(struct nouveau_object *); +int nv04_fifo_init(struct nouveau_object *); +void nv04_fifo_pause(struct nouveau_fifo *, unsigned long *); +void nv04_fifo_start(struct nouveau_fifo *, unsigned long *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c new file mode 100644 index 0000000..2c927c1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c @@ -0,0 +1,171 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> +#include <core/ramht.h> + +#include <subdev/instmem.h> +#include <subdev/instmem/nv04.h> +#include <subdev/fb.h> + +#include <engine/fifo.h> + +#include "nv04.h" + +static struct ramfc_desc +nv10_ramfc[] = { + { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, + { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, + { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT }, + { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, + { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, + { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE }, + { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, + { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE }, + { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 }, + {} +}; + +/******************************************************************************* + * FIFO channel objects + ******************************************************************************/ + +static int +nv10_fifo_chan_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_fifo_priv *priv = (void *)engine; + struct nv04_fifo_chan *chan; + struct nv03_channel_dma_class *args = data; + int ret; + + if (size < sizeof(*args)) + return -EINVAL; + + ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, + 0x10000, args->pushbuf, + (1ULL << NVDEV_ENGINE_DMAOBJ) | + (1ULL << NVDEV_ENGINE_SW) | + (1ULL << NVDEV_ENGINE_GR), &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + nv_parent(chan)->object_attach = nv04_fifo_object_attach; + nv_parent(chan)->object_detach = nv04_fifo_object_detach; + nv_parent(chan)->context_attach = nv04_fifo_context_attach; + chan->ramfc = chan->base.chid * 32; + + nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset); + nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset); + nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); + nv_wo32(priv->ramfc, chan->ramfc + 0x14, + NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | +#ifdef __BIG_ENDIAN + NV_PFIFO_CACHE1_BIG_ENDIAN | +#endif + NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); + return 0; +} + +static struct nouveau_ofuncs +nv10_fifo_ofuncs = { + .ctor = nv10_fifo_chan_ctor, + .dtor = nv04_fifo_chan_dtor, + .init = nv04_fifo_chan_init, + .fini = nv04_fifo_chan_fini, + .rd32 = _nouveau_fifo_channel_rd32, + .wr32 = _nouveau_fifo_channel_wr32, +}; + +static struct nouveau_oclass +nv10_fifo_sclass[] = { + { NV10_CHANNEL_DMA_CLASS, &nv10_fifo_ofuncs }, + {} +}; + +/******************************************************************************* + * FIFO context - basically just the instmem reserved for the channel + ******************************************************************************/ + +static struct nouveau_oclass +nv10_fifo_cclass = { + .handle = NV_ENGCTX(FIFO, 0x10), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_fifo_context_ctor, + .dtor = _nouveau_fifo_context_dtor, + .init = _nouveau_fifo_context_init, + .fini = _nouveau_fifo_context_fini, + .rd32 = _nouveau_fifo_context_rd32, + .wr32 = _nouveau_fifo_context_wr32, + }, +}; + +/******************************************************************************* + * PFIFO engine + ******************************************************************************/ + +static int +nv10_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_instmem_priv *imem = nv04_instmem(parent); + struct nv04_fifo_priv *priv; + int ret; + + ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nouveau_ramht_ref(imem->ramht, &priv->ramht); + nouveau_gpuobj_ref(imem->ramro, &priv->ramro); + nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc); + + nv_subdev(priv)->unit = 0x00000100; + nv_subdev(priv)->intr = nv04_fifo_intr; + nv_engine(priv)->cclass = &nv10_fifo_cclass; + nv_engine(priv)->sclass = nv10_fifo_sclass; + priv->base.pause = nv04_fifo_pause; + priv->base.start = nv04_fifo_start; + priv->ramfc_desc = nv10_ramfc; + return 0; +} + +struct nouveau_oclass +nv10_fifo_oclass = { + .handle = NV_ENGINE(FIFO, 0x10), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv10_fifo_ctor, + .dtor = nv04_fifo_dtor, + .init = nv04_fifo_init, + .fini = _nouveau_fifo_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c new file mode 100644 index 0000000..a9cb51d --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c @@ -0,0 +1,208 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> +#include <core/ramht.h> + +#include <subdev/instmem.h> +#include <subdev/instmem/nv04.h> +#include <subdev/fb.h> + +#include <engine/fifo.h> + +#include "nv04.h" + +static struct ramfc_desc +nv17_ramfc[] = { + { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, + { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, + { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT }, + { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, + { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, + { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE }, + { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, + { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE }, + { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 }, + { 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE }, + { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP }, + { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT }, + { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE }, + { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE }, + {} +}; + +/******************************************************************************* + * FIFO channel objects + ******************************************************************************/ + +static int +nv17_fifo_chan_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_fifo_priv *priv = (void *)engine; + struct nv04_fifo_chan *chan; + struct nv03_channel_dma_class *args = data; + int ret; + + if (size < sizeof(*args)) + return -EINVAL; + + ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, + 0x10000, args->pushbuf, + (1ULL << NVDEV_ENGINE_DMAOBJ) | + (1ULL << NVDEV_ENGINE_SW) | + (1ULL << NVDEV_ENGINE_GR) | + (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */ + &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + nv_parent(chan)->object_attach = nv04_fifo_object_attach; + nv_parent(chan)->object_detach = nv04_fifo_object_detach; + nv_parent(chan)->context_attach = nv04_fifo_context_attach; + chan->ramfc = chan->base.chid * 64; + + nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset); + nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset); + nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); + nv_wo32(priv->ramfc, chan->ramfc + 0x14, + NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | +#ifdef __BIG_ENDIAN + NV_PFIFO_CACHE1_BIG_ENDIAN | +#endif + NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); + return 0; +} + +static struct nouveau_ofuncs +nv17_fifo_ofuncs = { + .ctor = nv17_fifo_chan_ctor, + .dtor = nv04_fifo_chan_dtor, + .init = nv04_fifo_chan_init, + .fini = nv04_fifo_chan_fini, + .rd32 = _nouveau_fifo_channel_rd32, + .wr32 = _nouveau_fifo_channel_wr32, +}; + +static struct nouveau_oclass +nv17_fifo_sclass[] = { + { NV17_CHANNEL_DMA_CLASS, &nv17_fifo_ofuncs }, + {} +}; + +/******************************************************************************* + * FIFO context - basically just the instmem reserved for the channel + ******************************************************************************/ + +static struct nouveau_oclass +nv17_fifo_cclass = { + .handle = NV_ENGCTX(FIFO, 0x17), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_fifo_context_ctor, + .dtor = _nouveau_fifo_context_dtor, + .init = _nouveau_fifo_context_init, + .fini = _nouveau_fifo_context_fini, + .rd32 = _nouveau_fifo_context_rd32, + .wr32 = _nouveau_fifo_context_wr32, + }, +}; + +/******************************************************************************* + * PFIFO engine + ******************************************************************************/ + +static int +nv17_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_instmem_priv *imem = nv04_instmem(parent); + struct nv04_fifo_priv *priv; + int ret; + + ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nouveau_ramht_ref(imem->ramht, &priv->ramht); + nouveau_gpuobj_ref(imem->ramro, &priv->ramro); + nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc); + + nv_subdev(priv)->unit = 0x00000100; + nv_subdev(priv)->intr = nv04_fifo_intr; + nv_engine(priv)->cclass = &nv17_fifo_cclass; + nv_engine(priv)->sclass = nv17_fifo_sclass; + priv->base.pause = nv04_fifo_pause; + priv->base.start = nv04_fifo_start; + priv->ramfc_desc = nv17_ramfc; + return 0; +} + +static int +nv17_fifo_init(struct nouveau_object *object) +{ + struct nv04_fifo_priv *priv = (void *)object; + int ret; + + ret = nouveau_fifo_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff); + nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); + + nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | + ((priv->ramht->bits - 9) << 16) | + (priv->ramht->base.addr >> 8)); + nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8); + nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8 | 0x00010000); + + nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max); + + nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff); + nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff); + + nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1); + nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); + nv_wr32(priv, NV03_PFIFO_CACHES, 1); + return 0; +} + +struct nouveau_oclass +nv17_fifo_oclass = { + .handle = NV_ENGINE(FIFO, 0x17), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv17_fifo_ctor, + .dtor = nv04_fifo_dtor, + .init = nv17_fifo_init, + .fini = _nouveau_fifo_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c new file mode 100644 index 0000000..2b1f917 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c @@ -0,0 +1,349 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> +#include <core/ramht.h> + +#include <subdev/instmem.h> +#include <subdev/instmem/nv04.h> +#include <subdev/fb.h> + +#include <engine/fifo.h> + +#include "nv04.h" + +static struct ramfc_desc +nv40_ramfc[] = { + { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, + { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, + { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT }, + { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, + { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, + { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE }, + { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, + { 2, 28, 0x18, 28, 0x002058 }, + { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE }, + { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 }, + { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE }, + { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP }, + { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT }, + { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE }, + { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE }, + { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE }, + { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE }, + { 32, 0, 0x40, 0, 0x0032e4 }, + { 32, 0, 0x44, 0, 0x0032e8 }, + { 32, 0, 0x4c, 0, 0x002088 }, + { 32, 0, 0x50, 0, 0x003300 }, + { 32, 0, 0x54, 0, 0x00330c }, + {} +}; + +/******************************************************************************* + * FIFO channel objects + ******************************************************************************/ + +static int +nv40_fifo_object_attach(struct nouveau_object *parent, + struct nouveau_object *object, u32 handle) +{ + struct nv04_fifo_priv *priv = (void *)parent->engine; + struct nv04_fifo_chan *chan = (void *)parent; + u32 context, chid = chan->base.chid; + int ret; + + if (nv_iclass(object, NV_GPUOBJ_CLASS)) + context = nv_gpuobj(object)->addr >> 4; + else + context = 0x00000004; /* just non-zero */ + + switch (nv_engidx(object->engine)) { + case NVDEV_ENGINE_DMAOBJ: + case NVDEV_ENGINE_SW: + context |= 0x00000000; + break; + case NVDEV_ENGINE_GR: + context |= 0x00100000; + break; + case NVDEV_ENGINE_MPEG: + context |= 0x00200000; + break; + default: + return -EINVAL; + } + + context |= chid << 23; + + mutex_lock(&nv_subdev(priv)->mutex); + ret = nouveau_ramht_insert(priv->ramht, chid, handle, context); + mutex_unlock(&nv_subdev(priv)->mutex); + return ret; +} + +static int +nv40_fifo_context_attach(struct nouveau_object *parent, + struct nouveau_object *engctx) +{ + struct nv04_fifo_priv *priv = (void *)parent->engine; + struct nv04_fifo_chan *chan = (void *)parent; + unsigned long flags; + u32 reg, ctx; + + switch (nv_engidx(engctx->engine)) { + case NVDEV_ENGINE_SW: + return 0; + case NVDEV_ENGINE_GR: + reg = 0x32e0; + ctx = 0x38; + break; + case NVDEV_ENGINE_MPEG: + reg = 0x330c; + ctx = 0x54; + break; + default: + return -EINVAL; + } + + spin_lock_irqsave(&priv->base.lock, flags); + nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4; + nv_mask(priv, 0x002500, 0x00000001, 0x00000000); + + if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid) + nv_wr32(priv, reg, nv_engctx(engctx)->addr); + nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr); + + nv_mask(priv, 0x002500, 0x00000001, 0x00000001); + spin_unlock_irqrestore(&priv->base.lock, flags); + return 0; +} + +static int +nv40_fifo_context_detach(struct nouveau_object *parent, bool suspend, + struct nouveau_object *engctx) +{ + struct nv04_fifo_priv *priv = (void *)parent->engine; + struct nv04_fifo_chan *chan = (void *)parent; + unsigned long flags; + u32 reg, ctx; + + switch (nv_engidx(engctx->engine)) { + case NVDEV_ENGINE_SW: + return 0; + case NVDEV_ENGINE_GR: + reg = 0x32e0; + ctx = 0x38; + break; + case NVDEV_ENGINE_MPEG: + reg = 0x330c; + ctx = 0x54; + break; + default: + return -EINVAL; + } + + spin_lock_irqsave(&priv->base.lock, flags); + nv_mask(priv, 0x002500, 0x00000001, 0x00000000); + + if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid) + nv_wr32(priv, reg, 0x00000000); + nv_wo32(priv->ramfc, chan->ramfc + ctx, 0x00000000); + + nv_mask(priv, 0x002500, 0x00000001, 0x00000001); + spin_unlock_irqrestore(&priv->base.lock, flags); + return 0; +} + +static int +nv40_fifo_chan_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_fifo_priv *priv = (void *)engine; + struct nv04_fifo_chan *chan; + struct nv03_channel_dma_class *args = data; + int ret; + + if (size < sizeof(*args)) + return -EINVAL; + + ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, + 0x1000, args->pushbuf, + (1ULL << NVDEV_ENGINE_DMAOBJ) | + (1ULL << NVDEV_ENGINE_SW) | + (1ULL << NVDEV_ENGINE_GR) | + (1ULL << NVDEV_ENGINE_MPEG), &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + nv_parent(chan)->context_attach = nv40_fifo_context_attach; + nv_parent(chan)->context_detach = nv40_fifo_context_detach; + nv_parent(chan)->object_attach = nv40_fifo_object_attach; + nv_parent(chan)->object_detach = nv04_fifo_object_detach; + chan->ramfc = chan->base.chid * 128; + + nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset); + nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset); + nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); + nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 | + NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | + NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | +#ifdef __BIG_ENDIAN + NV_PFIFO_CACHE1_BIG_ENDIAN | +#endif + NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); + nv_wo32(priv->ramfc, chan->ramfc + 0x3c, 0x0001ffff); + return 0; +} + +static struct nouveau_ofuncs +nv40_fifo_ofuncs = { + .ctor = nv40_fifo_chan_ctor, + .dtor = nv04_fifo_chan_dtor, + .init = nv04_fifo_chan_init, + .fini = nv04_fifo_chan_fini, + .rd32 = _nouveau_fifo_channel_rd32, + .wr32 = _nouveau_fifo_channel_wr32, +}; + +static struct nouveau_oclass +nv40_fifo_sclass[] = { + { NV40_CHANNEL_DMA_CLASS, &nv40_fifo_ofuncs }, + {} +}; + +/******************************************************************************* + * FIFO context - basically just the instmem reserved for the channel + ******************************************************************************/ + +static struct nouveau_oclass +nv40_fifo_cclass = { + .handle = NV_ENGCTX(FIFO, 0x40), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_fifo_context_ctor, + .dtor = _nouveau_fifo_context_dtor, + .init = _nouveau_fifo_context_init, + .fini = _nouveau_fifo_context_fini, + .rd32 = _nouveau_fifo_context_rd32, + .wr32 = _nouveau_fifo_context_wr32, + }, +}; + +/******************************************************************************* + * PFIFO engine + ******************************************************************************/ + +static int +nv40_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_instmem_priv *imem = nv04_instmem(parent); + struct nv04_fifo_priv *priv; + int ret; + + ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nouveau_ramht_ref(imem->ramht, &priv->ramht); + nouveau_gpuobj_ref(imem->ramro, &priv->ramro); + nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc); + + nv_subdev(priv)->unit = 0x00000100; + nv_subdev(priv)->intr = nv04_fifo_intr; + nv_engine(priv)->cclass = &nv40_fifo_cclass; + nv_engine(priv)->sclass = nv40_fifo_sclass; + priv->base.pause = nv04_fifo_pause; + priv->base.start = nv04_fifo_start; + priv->ramfc_desc = nv40_ramfc; + return 0; +} + +static int +nv40_fifo_init(struct nouveau_object *object) +{ + struct nv04_fifo_priv *priv = (void *)object; + struct nouveau_fb *pfb = nouveau_fb(object); + int ret; + + ret = nouveau_fifo_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x002040, 0x000000ff); + nv_wr32(priv, 0x002044, 0x2101ffff); + nv_wr32(priv, 0x002058, 0x00000001); + + nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | + ((priv->ramht->bits - 9) << 16) | + (priv->ramht->base.addr >> 8)); + nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8); + + switch (nv_device(priv)->chipset) { + case 0x47: + case 0x49: + case 0x4b: + nv_wr32(priv, 0x002230, 0x00000001); + case 0x40: + case 0x41: + case 0x42: + case 0x43: + case 0x45: + case 0x48: + nv_wr32(priv, 0x002220, 0x00030002); + break; + default: + nv_wr32(priv, 0x002230, 0x00000000); + nv_wr32(priv, 0x002220, ((pfb->ram.size - 512 * 1024 + + priv->ramfc->addr) >> 16) | + 0x00030000); + break; + } + + nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max); + + nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff); + nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff); + + nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1); + nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); + nv_wr32(priv, NV03_PFIFO_CACHES, 1); + return 0; +} + +struct nouveau_oclass +nv40_fifo_oclass = { + .handle = NV_ENGINE(FIFO, 0x40), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv40_fifo_ctor, + .dtor = nv04_fifo_dtor, + .init = nv40_fifo_init, + .fini = _nouveau_fifo_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c new file mode 100644 index 0000000..e9b8217 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c @@ -0,0 +1,515 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/client.h> +#include <core/engctx.h> +#include <core/ramht.h> +#include <core/class.h> +#include <core/math.h> + +#include <subdev/timer.h> +#include <subdev/bar.h> + +#include <engine/dmaobj.h> +#include <engine/fifo.h> + +#include "nv50.h" + +/******************************************************************************* + * FIFO channel objects + ******************************************************************************/ + +static void +nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv) +{ + struct nouveau_bar *bar = nouveau_bar(priv); + struct nouveau_gpuobj *cur; + int i, p; + + cur = priv->playlist[priv->cur_playlist]; + priv->cur_playlist = !priv->cur_playlist; + + for (i = priv->base.min, p = 0; i < priv->base.max; i++) { + if (nv_rd32(priv, 0x002600 + (i * 4)) & 0x80000000) + nv_wo32(cur, p++ * 4, i); + } + + bar->flush(bar); + + nv_wr32(priv, 0x0032f4, cur->addr >> 12); + nv_wr32(priv, 0x0032ec, p); + nv_wr32(priv, 0x002500, 0x00000101); +} + +void +nv50_fifo_playlist_update(struct nv50_fifo_priv *priv) +{ + mutex_lock(&nv_subdev(priv)->mutex); + nv50_fifo_playlist_update_locked(priv); + mutex_unlock(&nv_subdev(priv)->mutex); +} + +static int +nv50_fifo_context_attach(struct nouveau_object *parent, + struct nouveau_object *object) +{ + struct nouveau_bar *bar = nouveau_bar(parent); + struct nv50_fifo_base *base = (void *)parent->parent; + struct nouveau_gpuobj *ectx = (void *)object; + u64 limit = ectx->addr + ectx->size - 1; + u64 start = ectx->addr; + u32 addr; + + switch (nv_engidx(object->engine)) { + case NVDEV_ENGINE_SW : return 0; + case NVDEV_ENGINE_GR : addr = 0x0000; break; + case NVDEV_ENGINE_MPEG : addr = 0x0060; break; + default: + return -EINVAL; + } + + nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; + nv_wo32(base->eng, addr + 0x00, 0x00190000); + nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit)); + nv_wo32(base->eng, addr + 0x08, lower_32_bits(start)); + nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 | + upper_32_bits(start)); + nv_wo32(base->eng, addr + 0x10, 0x00000000); + nv_wo32(base->eng, addr + 0x14, 0x00000000); + bar->flush(bar); + return 0; +} + +static int +nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend, + struct nouveau_object *object) +{ + struct nouveau_bar *bar = nouveau_bar(parent); + struct nv50_fifo_priv *priv = (void *)parent->engine; + struct nv50_fifo_base *base = (void *)parent->parent; + struct nv50_fifo_chan *chan = (void *)parent; + u32 addr, me; + int ret = 0; + + switch (nv_engidx(object->engine)) { + case NVDEV_ENGINE_SW : return 0; + case NVDEV_ENGINE_GR : addr = 0x0000; break; + case NVDEV_ENGINE_MPEG : addr = 0x0060; break; + default: + return -EINVAL; + } + + /* HW bug workaround: + * + * PFIFO will hang forever if the connected engines don't report + * that they've processed the context switch request. + * + * In order for the kickoff to work, we need to ensure all the + * connected engines are in a state where they can answer. + * + * Newer chipsets don't seem to suffer from this issue, and well, + * there's also a "ignore these engines" bitmask reg we can use + * if we hit the issue there.. + */ + me = nv_mask(priv, 0x00b860, 0x00000001, 0x00000001); + + /* do the kickoff... */ + nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12); + if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) { + nv_error(priv, "channel %d [%s] unload timeout\n", + chan->base.chid, nouveau_client_name(chan)); + if (suspend) + ret = -EBUSY; + } + nv_wr32(priv, 0x00b860, me); + + if (ret == 0) { + nv_wo32(base->eng, addr + 0x00, 0x00000000); + nv_wo32(base->eng, addr + 0x04, 0x00000000); + nv_wo32(base->eng, addr + 0x08, 0x00000000); + nv_wo32(base->eng, addr + 0x0c, 0x00000000); + nv_wo32(base->eng, addr + 0x10, 0x00000000); + nv_wo32(base->eng, addr + 0x14, 0x00000000); + bar->flush(bar); + } + + return ret; +} + +static int +nv50_fifo_object_attach(struct nouveau_object *parent, + struct nouveau_object *object, u32 handle) +{ + struct nv50_fifo_chan *chan = (void *)parent; + u32 context; + + if (nv_iclass(object, NV_GPUOBJ_CLASS)) + context = nv_gpuobj(object)->node->offset >> 4; + else + context = 0x00000004; /* just non-zero */ + + switch (nv_engidx(object->engine)) { + case NVDEV_ENGINE_DMAOBJ: + case NVDEV_ENGINE_SW : context |= 0x00000000; break; + case NVDEV_ENGINE_GR : context |= 0x00100000; break; + case NVDEV_ENGINE_MPEG : context |= 0x00200000; break; + default: + return -EINVAL; + } + + return nouveau_ramht_insert(chan->ramht, 0, handle, context); +} + +void +nv50_fifo_object_detach(struct nouveau_object *parent, int cookie) +{ + struct nv50_fifo_chan *chan = (void *)parent; + nouveau_ramht_remove(chan->ramht, cookie); +} + +static int +nv50_fifo_chan_ctor_dma(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_bar *bar = nouveau_bar(parent); + struct nv50_fifo_base *base = (void *)parent; + struct nv50_fifo_chan *chan; + struct nv03_channel_dma_class *args = data; + int ret; + + if (size < sizeof(*args)) + return -EINVAL; + + ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, + 0x2000, args->pushbuf, + (1ULL << NVDEV_ENGINE_DMAOBJ) | + (1ULL << NVDEV_ENGINE_SW) | + (1ULL << NVDEV_ENGINE_GR) | + (1ULL << NVDEV_ENGINE_MPEG), &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + nv_parent(chan)->context_attach = nv50_fifo_context_attach; + nv_parent(chan)->context_detach = nv50_fifo_context_detach; + nv_parent(chan)->object_attach = nv50_fifo_object_attach; + nv_parent(chan)->object_detach = nv50_fifo_object_detach; + + ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16, + &chan->ramht); + if (ret) + return ret; + + nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset)); + nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset)); + nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset)); + nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset)); + nv_wo32(base->ramfc, 0x3c, 0x003f6078); + nv_wo32(base->ramfc, 0x44, 0x01003fff); + nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); + nv_wo32(base->ramfc, 0x4c, 0xffffffff); + nv_wo32(base->ramfc, 0x60, 0x7fffffff); + nv_wo32(base->ramfc, 0x78, 0x00000000); + nv_wo32(base->ramfc, 0x7c, 0x30000001); + nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | + (4 << 24) /* SEARCH_FULL */ | + (chan->ramht->base.node->offset >> 4)); + bar->flush(bar); + return 0; +} + +static int +nv50_fifo_chan_ctor_ind(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_channel_ind_class *args = data; + struct nouveau_bar *bar = nouveau_bar(parent); + struct nv50_fifo_base *base = (void *)parent; + struct nv50_fifo_chan *chan; + u64 ioffset, ilength; + int ret; + + if (size < sizeof(*args)) + return -EINVAL; + + ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, + 0x2000, args->pushbuf, + (1ULL << NVDEV_ENGINE_DMAOBJ) | + (1ULL << NVDEV_ENGINE_SW) | + (1ULL << NVDEV_ENGINE_GR) | + (1ULL << NVDEV_ENGINE_MPEG), &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + nv_parent(chan)->context_attach = nv50_fifo_context_attach; + nv_parent(chan)->context_detach = nv50_fifo_context_detach; + nv_parent(chan)->object_attach = nv50_fifo_object_attach; + nv_parent(chan)->object_detach = nv50_fifo_object_detach; + + ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16, + &chan->ramht); + if (ret) + return ret; + + ioffset = args->ioffset; + ilength = log2i(args->ilength / 8); + + nv_wo32(base->ramfc, 0x3c, 0x403f6078); + nv_wo32(base->ramfc, 0x44, 0x01003fff); + nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); + nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset)); + nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16)); + nv_wo32(base->ramfc, 0x60, 0x7fffffff); + nv_wo32(base->ramfc, 0x78, 0x00000000); + nv_wo32(base->ramfc, 0x7c, 0x30000001); + nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | + (4 << 24) /* SEARCH_FULL */ | + (chan->ramht->base.node->offset >> 4)); + bar->flush(bar); + return 0; +} + +void +nv50_fifo_chan_dtor(struct nouveau_object *object) +{ + struct nv50_fifo_chan *chan = (void *)object; + nouveau_ramht_ref(NULL, &chan->ramht); + nouveau_fifo_channel_destroy(&chan->base); +} + +static int +nv50_fifo_chan_init(struct nouveau_object *object) +{ + struct nv50_fifo_priv *priv = (void *)object->engine; + struct nv50_fifo_base *base = (void *)object->parent; + struct nv50_fifo_chan *chan = (void *)object; + struct nouveau_gpuobj *ramfc = base->ramfc; + u32 chid = chan->base.chid; + int ret; + + ret = nouveau_fifo_channel_init(&chan->base); + if (ret) + return ret; + + nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12); + nv50_fifo_playlist_update(priv); + return 0; +} + +int +nv50_fifo_chan_fini(struct nouveau_object *object, bool suspend) +{ + struct nv50_fifo_priv *priv = (void *)object->engine; + struct nv50_fifo_chan *chan = (void *)object; + u32 chid = chan->base.chid; + + /* remove channel from playlist, fifo will unload context */ + nv_mask(priv, 0x002600 + (chid * 4), 0x80000000, 0x00000000); + nv50_fifo_playlist_update(priv); + nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000); + + return nouveau_fifo_channel_fini(&chan->base, suspend); +} + +static struct nouveau_ofuncs +nv50_fifo_ofuncs_dma = { + .ctor = nv50_fifo_chan_ctor_dma, + .dtor = nv50_fifo_chan_dtor, + .init = nv50_fifo_chan_init, + .fini = nv50_fifo_chan_fini, + .rd32 = _nouveau_fifo_channel_rd32, + .wr32 = _nouveau_fifo_channel_wr32, +}; + +static struct nouveau_ofuncs +nv50_fifo_ofuncs_ind = { + .ctor = nv50_fifo_chan_ctor_ind, + .dtor = nv50_fifo_chan_dtor, + .init = nv50_fifo_chan_init, + .fini = nv50_fifo_chan_fini, + .rd32 = _nouveau_fifo_channel_rd32, + .wr32 = _nouveau_fifo_channel_wr32, +}; + +static struct nouveau_oclass +nv50_fifo_sclass[] = { + { NV50_CHANNEL_DMA_CLASS, &nv50_fifo_ofuncs_dma }, + { NV50_CHANNEL_IND_CLASS, &nv50_fifo_ofuncs_ind }, + {} +}; + +/******************************************************************************* + * FIFO context - basically just the instmem reserved for the channel + ******************************************************************************/ + +static int +nv50_fifo_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_fifo_base *base; + int ret; + + ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000, + 0x1000, NVOBJ_FLAG_HEAP, &base); + *pobject = nv_object(base); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0200, + 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0, + NVOBJ_FLAG_ZERO_ALLOC, &base->eng); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0, + &base->pgd); + if (ret) + return ret; + + ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd); + if (ret) + return ret; + + return 0; +} + +void +nv50_fifo_context_dtor(struct nouveau_object *object) +{ + struct nv50_fifo_base *base = (void *)object; + nouveau_vm_ref(NULL, &base->vm, base->pgd); + nouveau_gpuobj_ref(NULL, &base->pgd); + nouveau_gpuobj_ref(NULL, &base->eng); + nouveau_gpuobj_ref(NULL, &base->ramfc); + nouveau_gpuobj_ref(NULL, &base->cache); + nouveau_fifo_context_destroy(&base->base); +} + +static struct nouveau_oclass +nv50_fifo_cclass = { + .handle = NV_ENGCTX(FIFO, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_fifo_context_ctor, + .dtor = nv50_fifo_context_dtor, + .init = _nouveau_fifo_context_init, + .fini = _nouveau_fifo_context_fini, + .rd32 = _nouveau_fifo_context_rd32, + .wr32 = _nouveau_fifo_context_wr32, + }, +}; + +/******************************************************************************* + * PFIFO engine + ******************************************************************************/ + +static int +nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_fifo_priv *priv; + int ret; + + ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0, + &priv->playlist[0]); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0, + &priv->playlist[1]); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00000100; + nv_subdev(priv)->intr = nv04_fifo_intr; + nv_engine(priv)->cclass = &nv50_fifo_cclass; + nv_engine(priv)->sclass = nv50_fifo_sclass; + return 0; +} + +void +nv50_fifo_dtor(struct nouveau_object *object) +{ + struct nv50_fifo_priv *priv = (void *)object; + + nouveau_gpuobj_ref(NULL, &priv->playlist[1]); + nouveau_gpuobj_ref(NULL, &priv->playlist[0]); + + nouveau_fifo_destroy(&priv->base); +} + +int +nv50_fifo_init(struct nouveau_object *object) +{ + struct nv50_fifo_priv *priv = (void *)object; + int ret, i; + + ret = nouveau_fifo_init(&priv->base); + if (ret) + return ret; + + nv_mask(priv, 0x000200, 0x00000100, 0x00000000); + nv_mask(priv, 0x000200, 0x00000100, 0x00000100); + nv_wr32(priv, 0x00250c, 0x6f3cfc34); + nv_wr32(priv, 0x002044, 0x01003fff); + + nv_wr32(priv, 0x002100, 0xffffffff); + nv_wr32(priv, 0x002140, 0xbfffffff); + + for (i = 0; i < 128; i++) + nv_wr32(priv, 0x002600 + (i * 4), 0x00000000); + nv50_fifo_playlist_update_locked(priv); + + nv_wr32(priv, 0x003200, 0x00000001); + nv_wr32(priv, 0x003250, 0x00000001); + nv_wr32(priv, 0x002500, 0x00000001); + return 0; +} + +struct nouveau_oclass +nv50_fifo_oclass = { + .handle = NV_ENGINE(FIFO, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_fifo_ctor, + .dtor = nv50_fifo_dtor, + .init = nv50_fifo_init, + .fini = _nouveau_fifo_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h new file mode 100644 index 0000000..3a9ceb3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h @@ -0,0 +1,36 @@ +#ifndef __NV50_FIFO_H__ +#define __NV50_FIFO_H__ + +struct nv50_fifo_priv { + struct nouveau_fifo base; + struct nouveau_gpuobj *playlist[2]; + int cur_playlist; +}; + +struct nv50_fifo_base { + struct nouveau_fifo_base base; + struct nouveau_gpuobj *ramfc; + struct nouveau_gpuobj *cache; + struct nouveau_gpuobj *eng; + struct nouveau_gpuobj *pgd; + struct nouveau_vm *vm; +}; + +struct nv50_fifo_chan { + struct nouveau_fifo_chan base; + u32 subc[8]; + struct nouveau_ramht *ramht; +}; + +void nv50_fifo_playlist_update(struct nv50_fifo_priv *); + +void nv50_fifo_object_detach(struct nouveau_object *, int); +void nv50_fifo_chan_dtor(struct nouveau_object *); +int nv50_fifo_chan_fini(struct nouveau_object *, bool); + +void nv50_fifo_context_dtor(struct nouveau_object *); + +void nv50_fifo_dtor(struct nouveau_object *); +int nv50_fifo_init(struct nouveau_object *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c new file mode 100644 index 0000000..35b94bd --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c @@ -0,0 +1,442 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/client.h> +#include <core/engctx.h> +#include <core/ramht.h> +#include <core/event.h> +#include <core/class.h> +#include <core/math.h> + +#include <subdev/timer.h> +#include <subdev/bar.h> + +#include <engine/dmaobj.h> +#include <engine/fifo.h> + +#include "nv50.h" + +/******************************************************************************* + * FIFO channel objects + ******************************************************************************/ + +static int +nv84_fifo_context_attach(struct nouveau_object *parent, + struct nouveau_object *object) +{ + struct nouveau_bar *bar = nouveau_bar(parent); + struct nv50_fifo_base *base = (void *)parent->parent; + struct nouveau_gpuobj *ectx = (void *)object; + u64 limit = ectx->addr + ectx->size - 1; + u64 start = ectx->addr; + u32 addr; + + switch (nv_engidx(object->engine)) { + case NVDEV_ENGINE_SW : return 0; + case NVDEV_ENGINE_GR : addr = 0x0020; break; + case NVDEV_ENGINE_MPEG : addr = 0x0060; break; + case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break; + case NVDEV_ENGINE_COPY0: addr = 0x00c0; break; + default: + return -EINVAL; + } + + nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; + nv_wo32(base->eng, addr + 0x00, 0x00190000); + nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit)); + nv_wo32(base->eng, addr + 0x08, lower_32_bits(start)); + nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 | + upper_32_bits(start)); + nv_wo32(base->eng, addr + 0x10, 0x00000000); + nv_wo32(base->eng, addr + 0x14, 0x00000000); + bar->flush(bar); + return 0; +} + +static int +nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend, + struct nouveau_object *object) +{ + struct nouveau_bar *bar = nouveau_bar(parent); + struct nv50_fifo_priv *priv = (void *)parent->engine; + struct nv50_fifo_base *base = (void *)parent->parent; + struct nv50_fifo_chan *chan = (void *)parent; + u32 addr, save, engn; + bool done; + + switch (nv_engidx(object->engine)) { + case NVDEV_ENGINE_SW : return 0; + case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break; + case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break; + case NVDEV_ENGINE_CRYPT: engn = 4; addr = 0x00a0; break; + case NVDEV_ENGINE_COPY0: engn = 2; addr = 0x00c0; break; + default: + return -EINVAL; + } + + save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn); + nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12); + done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff); + nv_wr32(priv, 0x002520, save); + if (!done) { + nv_error(priv, "channel %d [%s] unload timeout\n", + chan->base.chid, nouveau_client_name(chan)); + if (suspend) + return -EBUSY; + } + + nv_wo32(base->eng, addr + 0x00, 0x00000000); + nv_wo32(base->eng, addr + 0x04, 0x00000000); + nv_wo32(base->eng, addr + 0x08, 0x00000000); + nv_wo32(base->eng, addr + 0x0c, 0x00000000); + nv_wo32(base->eng, addr + 0x10, 0x00000000); + nv_wo32(base->eng, addr + 0x14, 0x00000000); + bar->flush(bar); + return 0; +} + +static int +nv84_fifo_object_attach(struct nouveau_object *parent, + struct nouveau_object *object, u32 handle) +{ + struct nv50_fifo_chan *chan = (void *)parent; + u32 context; + + if (nv_iclass(object, NV_GPUOBJ_CLASS)) + context = nv_gpuobj(object)->node->offset >> 4; + else + context = 0x00000004; /* just non-zero */ + + switch (nv_engidx(object->engine)) { + case NVDEV_ENGINE_DMAOBJ: + case NVDEV_ENGINE_SW : context |= 0x00000000; break; + case NVDEV_ENGINE_GR : context |= 0x00100000; break; + case NVDEV_ENGINE_MPEG : + case NVDEV_ENGINE_PPP : context |= 0x00200000; break; + case NVDEV_ENGINE_ME : + case NVDEV_ENGINE_COPY0 : context |= 0x00300000; break; + case NVDEV_ENGINE_VP : context |= 0x00400000; break; + case NVDEV_ENGINE_CRYPT : + case NVDEV_ENGINE_UNK1C1: context |= 0x00500000; break; + case NVDEV_ENGINE_BSP : context |= 0x00600000; break; + default: + return -EINVAL; + } + + return nouveau_ramht_insert(chan->ramht, 0, handle, context); +} + +static int +nv84_fifo_chan_ctor_dma(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_bar *bar = nouveau_bar(parent); + struct nv50_fifo_base *base = (void *)parent; + struct nv50_fifo_chan *chan; + struct nv03_channel_dma_class *args = data; + int ret; + + if (size < sizeof(*args)) + return -EINVAL; + + ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, + 0x2000, args->pushbuf, + (1ULL << NVDEV_ENGINE_DMAOBJ) | + (1ULL << NVDEV_ENGINE_SW) | + (1ULL << NVDEV_ENGINE_GR) | + (1ULL << NVDEV_ENGINE_MPEG) | + (1ULL << NVDEV_ENGINE_ME) | + (1ULL << NVDEV_ENGINE_VP) | + (1ULL << NVDEV_ENGINE_CRYPT) | + (1ULL << NVDEV_ENGINE_BSP) | + (1ULL << NVDEV_ENGINE_PPP) | + (1ULL << NVDEV_ENGINE_COPY0) | + (1ULL << NVDEV_ENGINE_UNK1C1), &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16, + &chan->ramht); + if (ret) + return ret; + + nv_parent(chan)->context_attach = nv84_fifo_context_attach; + nv_parent(chan)->context_detach = nv84_fifo_context_detach; + nv_parent(chan)->object_attach = nv84_fifo_object_attach; + nv_parent(chan)->object_detach = nv50_fifo_object_detach; + + nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset)); + nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset)); + nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset)); + nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset)); + nv_wo32(base->ramfc, 0x3c, 0x003f6078); + nv_wo32(base->ramfc, 0x44, 0x01003fff); + nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); + nv_wo32(base->ramfc, 0x4c, 0xffffffff); + nv_wo32(base->ramfc, 0x60, 0x7fffffff); + nv_wo32(base->ramfc, 0x78, 0x00000000); + nv_wo32(base->ramfc, 0x7c, 0x30000001); + nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | + (4 << 24) /* SEARCH_FULL */ | + (chan->ramht->base.node->offset >> 4)); + nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10); + nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12); + bar->flush(bar); + return 0; +} + +static int +nv84_fifo_chan_ctor_ind(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_bar *bar = nouveau_bar(parent); + struct nv50_fifo_base *base = (void *)parent; + struct nv50_fifo_chan *chan; + struct nv50_channel_ind_class *args = data; + u64 ioffset, ilength; + int ret; + + if (size < sizeof(*args)) + return -EINVAL; + + ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, + 0x2000, args->pushbuf, + (1ULL << NVDEV_ENGINE_DMAOBJ) | + (1ULL << NVDEV_ENGINE_SW) | + (1ULL << NVDEV_ENGINE_GR) | + (1ULL << NVDEV_ENGINE_MPEG) | + (1ULL << NVDEV_ENGINE_ME) | + (1ULL << NVDEV_ENGINE_VP) | + (1ULL << NVDEV_ENGINE_CRYPT) | + (1ULL << NVDEV_ENGINE_BSP) | + (1ULL << NVDEV_ENGINE_PPP) | + (1ULL << NVDEV_ENGINE_COPY0) | + (1ULL << NVDEV_ENGINE_UNK1C1), &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16, + &chan->ramht); + if (ret) + return ret; + + nv_parent(chan)->context_attach = nv84_fifo_context_attach; + nv_parent(chan)->context_detach = nv84_fifo_context_detach; + nv_parent(chan)->object_attach = nv84_fifo_object_attach; + nv_parent(chan)->object_detach = nv50_fifo_object_detach; + + ioffset = args->ioffset; + ilength = log2i(args->ilength / 8); + + nv_wo32(base->ramfc, 0x3c, 0x403f6078); + nv_wo32(base->ramfc, 0x44, 0x01003fff); + nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); + nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset)); + nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16)); + nv_wo32(base->ramfc, 0x60, 0x7fffffff); + nv_wo32(base->ramfc, 0x78, 0x00000000); + nv_wo32(base->ramfc, 0x7c, 0x30000001); + nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | + (4 << 24) /* SEARCH_FULL */ | + (chan->ramht->base.node->offset >> 4)); + nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10); + nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12); + bar->flush(bar); + return 0; +} + +static int +nv84_fifo_chan_init(struct nouveau_object *object) +{ + struct nv50_fifo_priv *priv = (void *)object->engine; + struct nv50_fifo_base *base = (void *)object->parent; + struct nv50_fifo_chan *chan = (void *)object; + struct nouveau_gpuobj *ramfc = base->ramfc; + u32 chid = chan->base.chid; + int ret; + + ret = nouveau_fifo_channel_init(&chan->base); + if (ret) + return ret; + + nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8); + nv50_fifo_playlist_update(priv); + return 0; +} + +static struct nouveau_ofuncs +nv84_fifo_ofuncs_dma = { + .ctor = nv84_fifo_chan_ctor_dma, + .dtor = nv50_fifo_chan_dtor, + .init = nv84_fifo_chan_init, + .fini = nv50_fifo_chan_fini, + .rd32 = _nouveau_fifo_channel_rd32, + .wr32 = _nouveau_fifo_channel_wr32, +}; + +static struct nouveau_ofuncs +nv84_fifo_ofuncs_ind = { + .ctor = nv84_fifo_chan_ctor_ind, + .dtor = nv50_fifo_chan_dtor, + .init = nv84_fifo_chan_init, + .fini = nv50_fifo_chan_fini, + .rd32 = _nouveau_fifo_channel_rd32, + .wr32 = _nouveau_fifo_channel_wr32, +}; + +static struct nouveau_oclass +nv84_fifo_sclass[] = { + { NV84_CHANNEL_DMA_CLASS, &nv84_fifo_ofuncs_dma }, + { NV84_CHANNEL_IND_CLASS, &nv84_fifo_ofuncs_ind }, + {} +}; + +/******************************************************************************* + * FIFO context - basically just the instmem reserved for the channel + ******************************************************************************/ + +static int +nv84_fifo_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_fifo_base *base; + int ret; + + ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000, + 0x1000, NVOBJ_FLAG_HEAP, &base); + *pobject = nv_object(base); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 0, + NVOBJ_FLAG_ZERO_ALLOC, &base->eng); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, + 0, &base->pgd); + if (ret) + return ret; + + ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x1000, + 0x400, NVOBJ_FLAG_ZERO_ALLOC, &base->cache); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0100, + 0x100, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc); + if (ret) + return ret; + + return 0; +} + +static struct nouveau_oclass +nv84_fifo_cclass = { + .handle = NV_ENGCTX(FIFO, 0x84), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv84_fifo_context_ctor, + .dtor = nv50_fifo_context_dtor, + .init = _nouveau_fifo_context_init, + .fini = _nouveau_fifo_context_fini, + .rd32 = _nouveau_fifo_context_rd32, + .wr32 = _nouveau_fifo_context_wr32, + }, +}; + +/******************************************************************************* + * PFIFO engine + ******************************************************************************/ + +static void +nv84_fifo_uevent_enable(struct nouveau_event *event, int index) +{ + struct nv84_fifo_priv *priv = event->priv; + nv_mask(priv, 0x002140, 0x40000000, 0x40000000); +} + +static void +nv84_fifo_uevent_disable(struct nouveau_event *event, int index) +{ + struct nv84_fifo_priv *priv = event->priv; + nv_mask(priv, 0x002140, 0x40000000, 0x00000000); +} + +static int +nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_fifo_priv *priv; + int ret; + + ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0, + &priv->playlist[0]); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0, + &priv->playlist[1]); + if (ret) + return ret; + + priv->base.uevent->enable = nv84_fifo_uevent_enable; + priv->base.uevent->disable = nv84_fifo_uevent_disable; + priv->base.uevent->priv = priv; + + nv_subdev(priv)->unit = 0x00000100; + nv_subdev(priv)->intr = nv04_fifo_intr; + nv_engine(priv)->cclass = &nv84_fifo_cclass; + nv_engine(priv)->sclass = nv84_fifo_sclass; + return 0; +} + +struct nouveau_oclass +nv84_fifo_oclass = { + .handle = NV_ENGINE(FIFO, 0x84), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv84_fifo_ctor, + .dtor = nv50_fifo_dtor, + .init = nv50_fifo_init, + .fini = _nouveau_fifo_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c new file mode 100644 index 0000000..46dfa68 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c @@ -0,0 +1,733 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/client.h> +#include <core/handle.h> +#include <core/namedb.h> +#include <core/gpuobj.h> +#include <core/engctx.h> +#include <core/event.h> +#include <core/class.h> +#include <core/math.h> +#include <core/enum.h> + +#include <subdev/timer.h> +#include <subdev/bar.h> +#include <subdev/vm.h> + +#include <engine/dmaobj.h> +#include <engine/fifo.h> + +struct nvc0_fifo_priv { + struct nouveau_fifo base; + struct nouveau_gpuobj *playlist[2]; + int cur_playlist; + struct { + struct nouveau_gpuobj *mem; + struct nouveau_vma bar; + } user; + int spoon_nr; +}; + +struct nvc0_fifo_base { + struct nouveau_fifo_base base; + struct nouveau_gpuobj *pgd; + struct nouveau_vm *vm; +}; + +struct nvc0_fifo_chan { + struct nouveau_fifo_chan base; +}; + +/******************************************************************************* + * FIFO channel objects + ******************************************************************************/ + +static void +nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv) +{ + struct nouveau_bar *bar = nouveau_bar(priv); + struct nouveau_gpuobj *cur; + int i, p; + + mutex_lock(&nv_subdev(priv)->mutex); + cur = priv->playlist[priv->cur_playlist]; + priv->cur_playlist = !priv->cur_playlist; + + for (i = 0, p = 0; i < 128; i++) { + if (!(nv_rd32(priv, 0x003004 + (i * 8)) & 1)) + continue; + nv_wo32(cur, p + 0, i); + nv_wo32(cur, p + 4, 0x00000004); + p += 8; + } + bar->flush(bar); + + nv_wr32(priv, 0x002270, cur->addr >> 12); + nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3)); + if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000)) + nv_error(priv, "playlist update failed\n"); + mutex_unlock(&nv_subdev(priv)->mutex); +} + +static int +nvc0_fifo_context_attach(struct nouveau_object *parent, + struct nouveau_object *object) +{ + struct nouveau_bar *bar = nouveau_bar(parent); + struct nvc0_fifo_base *base = (void *)parent->parent; + struct nouveau_engctx *ectx = (void *)object; + u32 addr; + int ret; + + switch (nv_engidx(object->engine)) { + case NVDEV_ENGINE_SW : return 0; + case NVDEV_ENGINE_GR : addr = 0x0210; break; + case NVDEV_ENGINE_COPY0: addr = 0x0230; break; + case NVDEV_ENGINE_COPY1: addr = 0x0240; break; + case NVDEV_ENGINE_BSP : addr = 0x0270; break; + case NVDEV_ENGINE_VP : addr = 0x0250; break; + case NVDEV_ENGINE_PPP : addr = 0x0260; break; + default: + return -EINVAL; + } + + if (!ectx->vma.node) { + ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm, + NV_MEM_ACCESS_RW, &ectx->vma); + if (ret) + return ret; + + nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; + } + + nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4); + nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset)); + bar->flush(bar); + return 0; +} + +static int +nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend, + struct nouveau_object *object) +{ + struct nouveau_bar *bar = nouveau_bar(parent); + struct nvc0_fifo_priv *priv = (void *)parent->engine; + struct nvc0_fifo_base *base = (void *)parent->parent; + struct nvc0_fifo_chan *chan = (void *)parent; + u32 addr; + + switch (nv_engidx(object->engine)) { + case NVDEV_ENGINE_SW : return 0; + case NVDEV_ENGINE_GR : addr = 0x0210; break; + case NVDEV_ENGINE_COPY0: addr = 0x0230; break; + case NVDEV_ENGINE_COPY1: addr = 0x0240; break; + case NVDEV_ENGINE_BSP : addr = 0x0270; break; + case NVDEV_ENGINE_VP : addr = 0x0250; break; + case NVDEV_ENGINE_PPP : addr = 0x0260; break; + default: + return -EINVAL; + } + + nv_wr32(priv, 0x002634, chan->base.chid); + if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { + nv_error(priv, "channel %d [%s] kick timeout\n", + chan->base.chid, nouveau_client_name(chan)); + if (suspend) + return -EBUSY; + } + + nv_wo32(base, addr + 0x00, 0x00000000); + nv_wo32(base, addr + 0x04, 0x00000000); + bar->flush(bar); + return 0; +} + +static int +nvc0_fifo_chan_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_bar *bar = nouveau_bar(parent); + struct nvc0_fifo_priv *priv = (void *)engine; + struct nvc0_fifo_base *base = (void *)parent; + struct nvc0_fifo_chan *chan; + struct nv50_channel_ind_class *args = data; + u64 usermem, ioffset, ilength; + int ret, i; + + if (size < sizeof(*args)) + return -EINVAL; + + ret = nouveau_fifo_channel_create(parent, engine, oclass, 1, + priv->user.bar.offset, 0x1000, + args->pushbuf, + (1ULL << NVDEV_ENGINE_SW) | + (1ULL << NVDEV_ENGINE_GR) | + (1ULL << NVDEV_ENGINE_COPY0) | + (1ULL << NVDEV_ENGINE_COPY1) | + (1ULL << NVDEV_ENGINE_BSP) | + (1ULL << NVDEV_ENGINE_VP) | + (1ULL << NVDEV_ENGINE_PPP), &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + nv_parent(chan)->context_attach = nvc0_fifo_context_attach; + nv_parent(chan)->context_detach = nvc0_fifo_context_detach; + + usermem = chan->base.chid * 0x1000; + ioffset = args->ioffset; + ilength = log2i(args->ilength / 8); + + for (i = 0; i < 0x1000; i += 4) + nv_wo32(priv->user.mem, usermem + i, 0x00000000); + + nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem)); + nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem)); + nv_wo32(base, 0x10, 0x0000face); + nv_wo32(base, 0x30, 0xfffff902); + nv_wo32(base, 0x48, lower_32_bits(ioffset)); + nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16)); + nv_wo32(base, 0x54, 0x00000002); + nv_wo32(base, 0x84, 0x20400000); + nv_wo32(base, 0x94, 0x30000001); + nv_wo32(base, 0x9c, 0x00000100); + nv_wo32(base, 0xa4, 0x1f1f1f1f); + nv_wo32(base, 0xa8, 0x1f1f1f1f); + nv_wo32(base, 0xac, 0x0000001f); + nv_wo32(base, 0xb8, 0xf8000000); + nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */ + nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */ + bar->flush(bar); + return 0; +} + +static int +nvc0_fifo_chan_init(struct nouveau_object *object) +{ + struct nouveau_gpuobj *base = nv_gpuobj(object->parent); + struct nvc0_fifo_priv *priv = (void *)object->engine; + struct nvc0_fifo_chan *chan = (void *)object; + u32 chid = chan->base.chid; + int ret; + + ret = nouveau_fifo_channel_init(&chan->base); + if (ret) + return ret; + + nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12); + nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001); + nvc0_fifo_playlist_update(priv); + return 0; +} + +static int +nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend) +{ + struct nvc0_fifo_priv *priv = (void *)object->engine; + struct nvc0_fifo_chan *chan = (void *)object; + u32 chid = chan->base.chid; + u32 mask, engine; + + nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000); + nvc0_fifo_playlist_update(priv); + mask = nv_rd32(priv, 0x0025a4); + for (engine = 0; mask && engine < 16; engine++) { + if (!(mask & (1 << engine))) + continue; + nv_mask(priv, 0x0025a8 + (engine * 4), 0x00000000, 0x00000000); + mask &= ~(1 << engine); + } + nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000); + + return nouveau_fifo_channel_fini(&chan->base, suspend); +} + +static struct nouveau_ofuncs +nvc0_fifo_ofuncs = { + .ctor = nvc0_fifo_chan_ctor, + .dtor = _nouveau_fifo_channel_dtor, + .init = nvc0_fifo_chan_init, + .fini = nvc0_fifo_chan_fini, + .rd32 = _nouveau_fifo_channel_rd32, + .wr32 = _nouveau_fifo_channel_wr32, +}; + +static struct nouveau_oclass +nvc0_fifo_sclass[] = { + { NVC0_CHANNEL_IND_CLASS, &nvc0_fifo_ofuncs }, + {} +}; + +/******************************************************************************* + * FIFO context - instmem heap and vm setup + ******************************************************************************/ + +static int +nvc0_fifo_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_fifo_base *base; + int ret; + + ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000, + 0x1000, NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_HEAP, &base); + *pobject = nv_object(base); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0, + &base->pgd); + if (ret) + return ret; + + nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr)); + nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr)); + nv_wo32(base, 0x0208, 0xffffffff); + nv_wo32(base, 0x020c, 0x000000ff); + + ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd); + if (ret) + return ret; + + return 0; +} + +static void +nvc0_fifo_context_dtor(struct nouveau_object *object) +{ + struct nvc0_fifo_base *base = (void *)object; + nouveau_vm_ref(NULL, &base->vm, base->pgd); + nouveau_gpuobj_ref(NULL, &base->pgd); + nouveau_fifo_context_destroy(&base->base); +} + +static struct nouveau_oclass +nvc0_fifo_cclass = { + .handle = NV_ENGCTX(FIFO, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_fifo_context_ctor, + .dtor = nvc0_fifo_context_dtor, + .init = _nouveau_fifo_context_init, + .fini = _nouveau_fifo_context_fini, + .rd32 = _nouveau_fifo_context_rd32, + .wr32 = _nouveau_fifo_context_wr32, + }, +}; + +/******************************************************************************* + * PFIFO engine + ******************************************************************************/ + +static const struct nouveau_enum nvc0_fifo_fault_unit[] = { + { 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR }, + { 0x03, "PEEPHOLE" }, + { 0x04, "BAR1" }, + { 0x05, "BAR3" }, + { 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO }, + { 0x10, "PBSP", NULL, NVDEV_ENGINE_BSP }, + { 0x11, "PPPP", NULL, NVDEV_ENGINE_PPP }, + { 0x13, "PCOUNTER" }, + { 0x14, "PVP", NULL, NVDEV_ENGINE_VP }, + { 0x15, "PCOPY0", NULL, NVDEV_ENGINE_COPY0 }, + { 0x16, "PCOPY1", NULL, NVDEV_ENGINE_COPY1 }, + { 0x17, "PDAEMON" }, + {} +}; + +static const struct nouveau_enum nvc0_fifo_fault_reason[] = { + { 0x00, "PT_NOT_PRESENT" }, + { 0x01, "PT_TOO_SHORT" }, + { 0x02, "PAGE_NOT_PRESENT" }, + { 0x03, "VM_LIMIT_EXCEEDED" }, + { 0x04, "NO_CHANNEL" }, + { 0x05, "PAGE_SYSTEM_ONLY" }, + { 0x06, "PAGE_READ_ONLY" }, + { 0x0a, "COMPRESSED_SYSRAM" }, + { 0x0c, "INVALID_STORAGE_TYPE" }, + {} +}; + +static const struct nouveau_enum nvc0_fifo_fault_hubclient[] = { + { 0x01, "PCOPY0" }, + { 0x02, "PCOPY1" }, + { 0x04, "DISPATCH" }, + { 0x05, "CTXCTL" }, + { 0x06, "PFIFO" }, + { 0x07, "BAR_READ" }, + { 0x08, "BAR_WRITE" }, + { 0x0b, "PVP" }, + { 0x0c, "PPPP" }, + { 0x0d, "PBSP" }, + { 0x11, "PCOUNTER" }, + { 0x12, "PDAEMON" }, + { 0x14, "CCACHE" }, + { 0x15, "CCACHE_POST" }, + {} +}; + +static const struct nouveau_enum nvc0_fifo_fault_gpcclient[] = { + { 0x01, "TEX" }, + { 0x0c, "ESETUP" }, + { 0x0e, "CTXCTL" }, + { 0x0f, "PROP" }, + {} +}; + +static const struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = { +/* { 0x00008000, "" } seen with null ib push */ + { 0x00200000, "ILLEGAL_MTHD" }, + { 0x00800000, "EMPTY_SUBC" }, + {} +}; + +static void +nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit) +{ + u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10)); + u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10)); + u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10)); + u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10)); + u32 client = (stat & 0x00001f00) >> 8; + const struct nouveau_enum *en; + struct nouveau_engine *engine; + struct nouveau_object *engctx = NULL; + + switch (unit) { + case 3: /* PEEPHOLE */ + nv_mask(priv, 0x001718, 0x00000000, 0x00000000); + break; + case 4: /* BAR1 */ + nv_mask(priv, 0x001704, 0x00000000, 0x00000000); + break; + case 5: /* BAR3 */ + nv_mask(priv, 0x001714, 0x00000000, 0x00000000); + break; + default: + break; + } + + nv_error(priv, "%s fault at 0x%010llx [", (stat & 0x00000080) ? + "write" : "read", (u64)vahi << 32 | valo); + nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f); + pr_cont("] from "); + en = nouveau_enum_print(nvc0_fifo_fault_unit, unit); + if (stat & 0x00000040) { + pr_cont("/"); + nouveau_enum_print(nvc0_fifo_fault_hubclient, client); + } else { + pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24); + nouveau_enum_print(nvc0_fifo_fault_gpcclient, client); + } + + if (en && en->data2) { + engine = nouveau_engine(priv, en->data2); + if (engine) + engctx = nouveau_engctx_get(engine, inst); + + } + pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12, + nouveau_client_name(engctx)); + + nouveau_engctx_put(engctx); +} + +static int +nvc0_fifo_swmthd(struct nvc0_fifo_priv *priv, u32 chid, u32 mthd, u32 data) +{ + struct nvc0_fifo_chan *chan = NULL; + struct nouveau_handle *bind; + unsigned long flags; + int ret = -EINVAL; + + spin_lock_irqsave(&priv->base.lock, flags); + if (likely(chid >= priv->base.min && chid <= priv->base.max)) + chan = (void *)priv->base.channel[chid]; + if (unlikely(!chan)) + goto out; + + bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e); + if (likely(bind)) { + if (!mthd || !nv_call(bind->object, mthd, data)) + ret = 0; + nouveau_namedb_put(bind); + } + +out: + spin_unlock_irqrestore(&priv->base.lock, flags); + return ret; +} + +static void +nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit) +{ + u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)); + u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000)); + u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000)); + u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0x7f; + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00003ffc); + u32 show = stat; + + if (stat & 0x00200000) { + if (mthd == 0x0054) { + if (!nvc0_fifo_swmthd(priv, chid, 0x0500, 0x00000000)) + show &= ~0x00200000; + } + } + + if (stat & 0x00800000) { + if (!nvc0_fifo_swmthd(priv, chid, mthd, data)) + show &= ~0x00800000; + } + + if (show) { + nv_error(priv, "SUBFIFO%d:", unit); + nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show); + pr_cont("\n"); + nv_error(priv, + "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", + unit, chid, + nouveau_client_name_for_fifo_chid(&priv->base, chid), + subc, mthd, data); + } + + nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008); + nv_wr32(priv, 0x040108 + (unit * 0x2000), stat); +} + +static void +nvc0_fifo_intr(struct nouveau_subdev *subdev) +{ + struct nvc0_fifo_priv *priv = (void *)subdev; + u32 mask = nv_rd32(priv, 0x002140); + u32 stat = nv_rd32(priv, 0x002100) & mask; + + if (stat & 0x00000001) { + u32 intr = nv_rd32(priv, 0x00252c); + nv_warn(priv, "INTR 0x00000001: 0x%08x\n", intr); + nv_wr32(priv, 0x002100, 0x00000001); + stat &= ~0x00000001; + } + + if (stat & 0x00000100) { + u32 intr = nv_rd32(priv, 0x00254c); + nv_warn(priv, "INTR 0x00000100: 0x%08x\n", intr); + nv_wr32(priv, 0x002100, 0x00000100); + stat &= ~0x00000100; + } + + if (stat & 0x00010000) { + u32 intr = nv_rd32(priv, 0x00256c); + nv_warn(priv, "INTR 0x00010000: 0x%08x\n", intr); + nv_wr32(priv, 0x002100, 0x00010000); + stat &= ~0x00010000; + } + + if (stat & 0x01000000) { + u32 intr = nv_rd32(priv, 0x00258c); + nv_warn(priv, "INTR 0x01000000: 0x%08x\n", intr); + nv_wr32(priv, 0x002100, 0x01000000); + stat &= ~0x01000000; + } + + if (stat & 0x10000000) { + u32 units = nv_rd32(priv, 0x00259c); + u32 u = units; + + while (u) { + int i = ffs(u) - 1; + nvc0_fifo_isr_vm_fault(priv, i); + u &= ~(1 << i); + } + + nv_wr32(priv, 0x00259c, units); + stat &= ~0x10000000; + } + + if (stat & 0x20000000) { + u32 units = nv_rd32(priv, 0x0025a0); + u32 u = units; + + while (u) { + int i = ffs(u) - 1; + nvc0_fifo_isr_subfifo_intr(priv, i); + u &= ~(1 << i); + } + + nv_wr32(priv, 0x0025a0, units); + stat &= ~0x20000000; + } + + if (stat & 0x40000000) { + u32 intr0 = nv_rd32(priv, 0x0025a4); + u32 intr1 = nv_mask(priv, 0x002a00, 0x00000000, 0x00000); + nv_debug(priv, "INTR 0x40000000: 0x%08x 0x%08x\n", + intr0, intr1); + stat &= ~0x40000000; + } + + if (stat & 0x80000000) { + u32 intr = nv_mask(priv, 0x0025a8, 0x00000000, 0x00000000); + nouveau_event_trigger(priv->base.uevent, 0); + nv_debug(priv, "INTR 0x80000000: 0x%08x\n", intr); + stat &= ~0x80000000; + } + + if (stat) { + nv_fatal(priv, "unhandled status 0x%08x\n", stat); + nv_wr32(priv, 0x002100, stat); + nv_wr32(priv, 0x002140, 0); + } +} + +static void +nvc0_fifo_uevent_enable(struct nouveau_event *event, int index) +{ + struct nvc0_fifo_priv *priv = event->priv; + nv_mask(priv, 0x002140, 0x80000000, 0x80000000); +} + +static void +nvc0_fifo_uevent_disable(struct nouveau_event *event, int index) +{ + struct nvc0_fifo_priv *priv = event->priv; + nv_mask(priv, 0x002140, 0x80000000, 0x00000000); +} + +static int +nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_fifo_priv *priv; + int ret; + + ret = nouveau_fifo_create(parent, engine, oclass, 0, 127, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0, + &priv->playlist[0]); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0, + &priv->playlist[1]); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0, + &priv->user.mem); + if (ret) + return ret; + + ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW, + &priv->user.bar); + if (ret) + return ret; + + priv->base.uevent->enable = nvc0_fifo_uevent_enable; + priv->base.uevent->disable = nvc0_fifo_uevent_disable; + priv->base.uevent->priv = priv; + + nv_subdev(priv)->unit = 0x00000100; + nv_subdev(priv)->intr = nvc0_fifo_intr; + nv_engine(priv)->cclass = &nvc0_fifo_cclass; + nv_engine(priv)->sclass = nvc0_fifo_sclass; + return 0; +} + +static void +nvc0_fifo_dtor(struct nouveau_object *object) +{ + struct nvc0_fifo_priv *priv = (void *)object; + + nouveau_gpuobj_unmap(&priv->user.bar); + nouveau_gpuobj_ref(NULL, &priv->user.mem); + nouveau_gpuobj_ref(NULL, &priv->playlist[1]); + nouveau_gpuobj_ref(NULL, &priv->playlist[0]); + + nouveau_fifo_destroy(&priv->base); +} + +static int +nvc0_fifo_init(struct nouveau_object *object) +{ + struct nvc0_fifo_priv *priv = (void *)object; + int ret, i; + + ret = nouveau_fifo_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x000204, 0xffffffff); + nv_wr32(priv, 0x002204, 0xffffffff); + + priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204)); + nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr); + + /* assign engines to subfifos */ + if (priv->spoon_nr >= 3) { + nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */ + nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */ + nv_wr32(priv, 0x002210, ~(1 << 1)); /* PPP */ + nv_wr32(priv, 0x002214, ~(1 << 1)); /* PBSP */ + nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */ + nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */ + } + + /* PSUBFIFO[n] */ + for (i = 0; i < priv->spoon_nr; i++) { + nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); + nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ + nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ + } + + nv_mask(priv, 0x002200, 0x00000001, 0x00000001); + nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12); + + nv_wr32(priv, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */ + nv_wr32(priv, 0x002100, 0xffffffff); + nv_wr32(priv, 0x002140, 0x3fffffff); + nv_wr32(priv, 0x002628, 0x00000001); /* makes mthd 0x20 work */ + return 0; +} + +struct nouveau_oclass +nvc0_fifo_oclass = { + .handle = NV_ENGINE(FIFO, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_fifo_ctor, + .dtor = nvc0_fifo_dtor, + .init = nvc0_fifo_init, + .fini = _nouveau_fifo_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c new file mode 100644 index 0000000..56192a7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c @@ -0,0 +1,679 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/client.h> +#include <core/handle.h> +#include <core/namedb.h> +#include <core/gpuobj.h> +#include <core/engctx.h> +#include <core/event.h> +#include <core/class.h> +#include <core/math.h> +#include <core/enum.h> + +#include <subdev/timer.h> +#include <subdev/bar.h> +#include <subdev/vm.h> + +#include <engine/dmaobj.h> +#include <engine/fifo.h> + +#define _(a,b) { (a), ((1ULL << (a)) | (b)) } +static const struct { + u64 subdev; + u64 mask; +} fifo_engine[] = { + _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW)), + _(NVDEV_ENGINE_VP , 0), + _(NVDEV_ENGINE_PPP , 0), + _(NVDEV_ENGINE_BSP , 0), + _(NVDEV_ENGINE_COPY0 , 0), + _(NVDEV_ENGINE_COPY1 , 0), + _(NVDEV_ENGINE_VENC , 0), +}; +#undef _ +#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine) + +struct nve0_fifo_engn { + struct nouveau_gpuobj *playlist[2]; + int cur_playlist; +}; + +struct nve0_fifo_priv { + struct nouveau_fifo base; + struct nve0_fifo_engn engine[FIFO_ENGINE_NR]; + struct { + struct nouveau_gpuobj *mem; + struct nouveau_vma bar; + } user; + int spoon_nr; +}; + +struct nve0_fifo_base { + struct nouveau_fifo_base base; + struct nouveau_gpuobj *pgd; + struct nouveau_vm *vm; +}; + +struct nve0_fifo_chan { + struct nouveau_fifo_chan base; + u32 engine; +}; + +/******************************************************************************* + * FIFO channel objects + ******************************************************************************/ + +static void +nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine) +{ + struct nouveau_bar *bar = nouveau_bar(priv); + struct nve0_fifo_engn *engn = &priv->engine[engine]; + struct nouveau_gpuobj *cur; + u32 match = (engine << 16) | 0x00000001; + int i, p; + + mutex_lock(&nv_subdev(priv)->mutex); + cur = engn->playlist[engn->cur_playlist]; + if (unlikely(cur == NULL)) { + int ret = nouveau_gpuobj_new(nv_object(priv), NULL, + 0x8000, 0x1000, 0, &cur); + if (ret) { + mutex_unlock(&nv_subdev(priv)->mutex); + nv_error(priv, "playlist alloc failed\n"); + return; + } + + engn->playlist[engn->cur_playlist] = cur; + } + + engn->cur_playlist = !engn->cur_playlist; + + for (i = 0, p = 0; i < priv->base.max; i++) { + u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001; + if (ctrl != match) + continue; + nv_wo32(cur, p + 0, i); + nv_wo32(cur, p + 4, 0x00000000); + p += 8; + } + bar->flush(bar); + + nv_wr32(priv, 0x002270, cur->addr >> 12); + nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); + if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000)) + nv_error(priv, "playlist %d update timeout\n", engine); + mutex_unlock(&nv_subdev(priv)->mutex); +} + +static int +nve0_fifo_context_attach(struct nouveau_object *parent, + struct nouveau_object *object) +{ + struct nouveau_bar *bar = nouveau_bar(parent); + struct nve0_fifo_base *base = (void *)parent->parent; + struct nouveau_engctx *ectx = (void *)object; + u32 addr; + int ret; + + switch (nv_engidx(object->engine)) { + case NVDEV_ENGINE_SW : return 0; + case NVDEV_ENGINE_GR : + case NVDEV_ENGINE_COPY0: + case NVDEV_ENGINE_COPY1: addr = 0x0210; break; + case NVDEV_ENGINE_BSP : addr = 0x0270; break; + case NVDEV_ENGINE_VP : addr = 0x0250; break; + case NVDEV_ENGINE_PPP : addr = 0x0260; break; + default: + return -EINVAL; + } + + if (!ectx->vma.node) { + ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm, + NV_MEM_ACCESS_RW, &ectx->vma); + if (ret) + return ret; + + nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; + } + + nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4); + nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset)); + bar->flush(bar); + return 0; +} + +static int +nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend, + struct nouveau_object *object) +{ + struct nouveau_bar *bar = nouveau_bar(parent); + struct nve0_fifo_priv *priv = (void *)parent->engine; + struct nve0_fifo_base *base = (void *)parent->parent; + struct nve0_fifo_chan *chan = (void *)parent; + u32 addr; + + switch (nv_engidx(object->engine)) { + case NVDEV_ENGINE_SW : return 0; + case NVDEV_ENGINE_GR : + case NVDEV_ENGINE_COPY0: + case NVDEV_ENGINE_COPY1: addr = 0x0210; break; + case NVDEV_ENGINE_BSP : addr = 0x0270; break; + case NVDEV_ENGINE_VP : addr = 0x0250; break; + case NVDEV_ENGINE_PPP : addr = 0x0260; break; + default: + return -EINVAL; + } + + nv_wr32(priv, 0x002634, chan->base.chid); + if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { + nv_error(priv, "channel %d [%s] kick timeout\n", + chan->base.chid, nouveau_client_name(chan)); + if (suspend) + return -EBUSY; + } + + nv_wo32(base, addr + 0x00, 0x00000000); + nv_wo32(base, addr + 0x04, 0x00000000); + bar->flush(bar); + return 0; +} + +static int +nve0_fifo_chan_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_bar *bar = nouveau_bar(parent); + struct nve0_fifo_priv *priv = (void *)engine; + struct nve0_fifo_base *base = (void *)parent; + struct nve0_fifo_chan *chan; + struct nve0_channel_ind_class *args = data; + u64 usermem, ioffset, ilength; + int ret, i; + + if (size < sizeof(*args)) + return -EINVAL; + + for (i = 0; i < FIFO_ENGINE_NR; i++) { + if (args->engine & (1 << i)) { + if (nouveau_engine(parent, fifo_engine[i].subdev)) { + args->engine = (1 << i); + break; + } + } + } + + if (i == FIFO_ENGINE_NR) + return -ENODEV; + + ret = nouveau_fifo_channel_create(parent, engine, oclass, 1, + priv->user.bar.offset, 0x200, + args->pushbuf, + fifo_engine[i].mask, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + nv_parent(chan)->context_attach = nve0_fifo_context_attach; + nv_parent(chan)->context_detach = nve0_fifo_context_detach; + chan->engine = i; + + usermem = chan->base.chid * 0x200; + ioffset = args->ioffset; + ilength = log2i(args->ilength / 8); + + for (i = 0; i < 0x200; i += 4) + nv_wo32(priv->user.mem, usermem + i, 0x00000000); + + nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem)); + nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem)); + nv_wo32(base, 0x10, 0x0000face); + nv_wo32(base, 0x30, 0xfffff902); + nv_wo32(base, 0x48, lower_32_bits(ioffset)); + nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16)); + nv_wo32(base, 0x84, 0x20400000); + nv_wo32(base, 0x94, 0x30000001); + nv_wo32(base, 0x9c, 0x00000100); + nv_wo32(base, 0xac, 0x0000001f); + nv_wo32(base, 0xe8, chan->base.chid); + nv_wo32(base, 0xb8, 0xf8000000); + nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */ + nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */ + bar->flush(bar); + return 0; +} + +static int +nve0_fifo_chan_init(struct nouveau_object *object) +{ + struct nouveau_gpuobj *base = nv_gpuobj(object->parent); + struct nve0_fifo_priv *priv = (void *)object->engine; + struct nve0_fifo_chan *chan = (void *)object; + u32 chid = chan->base.chid; + int ret; + + ret = nouveau_fifo_channel_init(&chan->base); + if (ret) + return ret; + + nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16); + nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12); + nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400); + nve0_fifo_playlist_update(priv, chan->engine); + nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400); + return 0; +} + +static int +nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend) +{ + struct nve0_fifo_priv *priv = (void *)object->engine; + struct nve0_fifo_chan *chan = (void *)object; + u32 chid = chan->base.chid; + + nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800); + nve0_fifo_playlist_update(priv, chan->engine); + nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000); + + return nouveau_fifo_channel_fini(&chan->base, suspend); +} + +static struct nouveau_ofuncs +nve0_fifo_ofuncs = { + .ctor = nve0_fifo_chan_ctor, + .dtor = _nouveau_fifo_channel_dtor, + .init = nve0_fifo_chan_init, + .fini = nve0_fifo_chan_fini, + .rd32 = _nouveau_fifo_channel_rd32, + .wr32 = _nouveau_fifo_channel_wr32, +}; + +static struct nouveau_oclass +nve0_fifo_sclass[] = { + { NVE0_CHANNEL_IND_CLASS, &nve0_fifo_ofuncs }, + {} +}; + +/******************************************************************************* + * FIFO context - instmem heap and vm setup + ******************************************************************************/ + +static int +nve0_fifo_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nve0_fifo_base *base; + int ret; + + ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000, + 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base); + *pobject = nv_object(base); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0, + &base->pgd); + if (ret) + return ret; + + nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr)); + nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr)); + nv_wo32(base, 0x0208, 0xffffffff); + nv_wo32(base, 0x020c, 0x000000ff); + + ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd); + if (ret) + return ret; + + return 0; +} + +static void +nve0_fifo_context_dtor(struct nouveau_object *object) +{ + struct nve0_fifo_base *base = (void *)object; + nouveau_vm_ref(NULL, &base->vm, base->pgd); + nouveau_gpuobj_ref(NULL, &base->pgd); + nouveau_fifo_context_destroy(&base->base); +} + +static struct nouveau_oclass +nve0_fifo_cclass = { + .handle = NV_ENGCTX(FIFO, 0xe0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nve0_fifo_context_ctor, + .dtor = nve0_fifo_context_dtor, + .init = _nouveau_fifo_context_init, + .fini = _nouveau_fifo_context_fini, + .rd32 = _nouveau_fifo_context_rd32, + .wr32 = _nouveau_fifo_context_wr32, + }, +}; + +/******************************************************************************* + * PFIFO engine + ******************************************************************************/ + +static const struct nouveau_enum nve0_fifo_fault_unit[] = { + {} +}; + +static const struct nouveau_enum nve0_fifo_fault_reason[] = { + { 0x00, "PT_NOT_PRESENT" }, + { 0x01, "PT_TOO_SHORT" }, + { 0x02, "PAGE_NOT_PRESENT" }, + { 0x03, "VM_LIMIT_EXCEEDED" }, + { 0x04, "NO_CHANNEL" }, + { 0x05, "PAGE_SYSTEM_ONLY" }, + { 0x06, "PAGE_READ_ONLY" }, + { 0x0a, "COMPRESSED_SYSRAM" }, + { 0x0c, "INVALID_STORAGE_TYPE" }, + {} +}; + +static const struct nouveau_enum nve0_fifo_fault_hubclient[] = { + {} +}; + +static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = { + {} +}; + +static const struct nouveau_bitfield nve0_fifo_subfifo_intr[] = { + { 0x00200000, "ILLEGAL_MTHD" }, + { 0x00800000, "EMPTY_SUBC" }, + {} +}; + +static void +nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit) +{ + u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10)); + u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10)); + u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10)); + u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10)); + u32 client = (stat & 0x00001f00) >> 8; + const struct nouveau_enum *en; + struct nouveau_engine *engine; + struct nouveau_object *engctx = NULL; + + nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ? + "write" : "read", (u64)vahi << 32 | valo); + nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f); + pr_cont("] from "); + en = nouveau_enum_print(nve0_fifo_fault_unit, unit); + if (stat & 0x00000040) { + pr_cont("/"); + nouveau_enum_print(nve0_fifo_fault_hubclient, client); + } else { + pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24); + nouveau_enum_print(nve0_fifo_fault_gpcclient, client); + } + + if (en && en->data2) { + engine = nouveau_engine(priv, en->data2); + if (engine) + engctx = nouveau_engctx_get(engine, inst); + + } + + pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12, + nouveau_client_name(engctx)); + + nouveau_engctx_put(engctx); +} + +static int +nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data) +{ + struct nve0_fifo_chan *chan = NULL; + struct nouveau_handle *bind; + unsigned long flags; + int ret = -EINVAL; + + spin_lock_irqsave(&priv->base.lock, flags); + if (likely(chid >= priv->base.min && chid <= priv->base.max)) + chan = (void *)priv->base.channel[chid]; + if (unlikely(!chan)) + goto out; + + bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e); + if (likely(bind)) { + if (!mthd || !nv_call(bind->object, mthd, data)) + ret = 0; + nouveau_namedb_put(bind); + } + +out: + spin_unlock_irqrestore(&priv->base.lock, flags); + return ret; +} + +static void +nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit) +{ + u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)); + u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000)); + u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000)); + u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff; + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00003ffc); + u32 show = stat; + + if (stat & 0x00200000) { + if (mthd == 0x0054) { + if (!nve0_fifo_swmthd(priv, chid, 0x0500, 0x00000000)) + show &= ~0x00200000; + } + } + + if (stat & 0x00800000) { + if (!nve0_fifo_swmthd(priv, chid, mthd, data)) + show &= ~0x00800000; + } + + if (show) { + nv_error(priv, "SUBFIFO%d:", unit); + nouveau_bitfield_print(nve0_fifo_subfifo_intr, show); + pr_cont("\n"); + nv_error(priv, + "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", + unit, chid, + nouveau_client_name_for_fifo_chid(&priv->base, chid), + subc, mthd, data); + } + + nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008); + nv_wr32(priv, 0x040108 + (unit * 0x2000), stat); +} + +static void +nve0_fifo_intr(struct nouveau_subdev *subdev) +{ + struct nve0_fifo_priv *priv = (void *)subdev; + u32 mask = nv_rd32(priv, 0x002140); + u32 stat = nv_rd32(priv, 0x002100) & mask; + + if (stat & 0x00000100) { + nv_warn(priv, "unknown status 0x00000100\n"); + nv_wr32(priv, 0x002100, 0x00000100); + stat &= ~0x00000100; + } + + if (stat & 0x10000000) { + u32 units = nv_rd32(priv, 0x00259c); + u32 u = units; + + while (u) { + int i = ffs(u) - 1; + nve0_fifo_isr_vm_fault(priv, i); + u &= ~(1 << i); + } + + nv_wr32(priv, 0x00259c, units); + stat &= ~0x10000000; + } + + if (stat & 0x20000000) { + u32 units = nv_rd32(priv, 0x0025a0); + u32 u = units; + + while (u) { + int i = ffs(u) - 1; + nve0_fifo_isr_subfifo_intr(priv, i); + u &= ~(1 << i); + } + + nv_wr32(priv, 0x0025a0, units); + stat &= ~0x20000000; + } + + if (stat & 0x40000000) { + nv_warn(priv, "unknown status 0x40000000\n"); + nv_mask(priv, 0x002a00, 0x00000000, 0x00000000); + stat &= ~0x40000000; + } + + if (stat & 0x80000000) { + nouveau_event_trigger(priv->base.uevent, 0); + nv_wr32(priv, 0x002100, 0x80000000); + stat &= ~0x80000000; + } + + if (stat) { + nv_fatal(priv, "unhandled status 0x%08x\n", stat); + nv_wr32(priv, 0x002100, stat); + nv_wr32(priv, 0x002140, 0); + } +} + +static void +nve0_fifo_uevent_enable(struct nouveau_event *event, int index) +{ + struct nve0_fifo_priv *priv = event->priv; + nv_mask(priv, 0x002140, 0x80000000, 0x80000000); +} + +static void +nve0_fifo_uevent_disable(struct nouveau_event *event, int index) +{ + struct nve0_fifo_priv *priv = event->priv; + nv_mask(priv, 0x002140, 0x80000000, 0x00000000); +} + +static int +nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nve0_fifo_priv *priv; + int ret; + + ret = nouveau_fifo_create(parent, engine, oclass, 0, 4095, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem); + if (ret) + return ret; + + ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW, + &priv->user.bar); + if (ret) + return ret; + + priv->base.uevent->enable = nve0_fifo_uevent_enable; + priv->base.uevent->disable = nve0_fifo_uevent_disable; + priv->base.uevent->priv = priv; + + nv_subdev(priv)->unit = 0x00000100; + nv_subdev(priv)->intr = nve0_fifo_intr; + nv_engine(priv)->cclass = &nve0_fifo_cclass; + nv_engine(priv)->sclass = nve0_fifo_sclass; + return 0; +} + +static void +nve0_fifo_dtor(struct nouveau_object *object) +{ + struct nve0_fifo_priv *priv = (void *)object; + int i; + + nouveau_gpuobj_unmap(&priv->user.bar); + nouveau_gpuobj_ref(NULL, &priv->user.mem); + + for (i = 0; i < ARRAY_SIZE(priv->engine); i++) { + nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]); + nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]); + } + + nouveau_fifo_destroy(&priv->base); +} + +static int +nve0_fifo_init(struct nouveau_object *object) +{ + struct nve0_fifo_priv *priv = (void *)object; + int ret, i; + + ret = nouveau_fifo_init(&priv->base); + if (ret) + return ret; + + /* enable all available PSUBFIFOs */ + nv_wr32(priv, 0x000204, 0xffffffff); + priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204)); + nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr); + + /* PSUBFIFO[n] */ + for (i = 0; i < priv->spoon_nr; i++) { + nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); + nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ + nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ + } + + nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12); + + nv_wr32(priv, 0x002a00, 0xffffffff); + nv_wr32(priv, 0x002100, 0xffffffff); + nv_wr32(priv, 0x002140, 0x3fffffff); + return 0; +} + +struct nouveau_oclass +nve0_fifo_oclass = { + .handle = NV_ENGINE(FIFO, 0xe0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nve0_fifo_ctor, + .dtor = nve0_fifo_dtor, + .init = nve0_fifo_init, + .fini = _nouveau_fifo_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h b/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h new file mode 100644 index 0000000..e194701 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h @@ -0,0 +1,129 @@ +#ifndef __NOUVEAU_GRCTX_H__ +#define __NOUVEAU_GRCTX_H__ + +struct nouveau_grctx { + struct nouveau_device *device; + + enum { + NOUVEAU_GRCTX_PROG, + NOUVEAU_GRCTX_VALS + } mode; + void *data; + + u32 ctxprog_max; + u32 ctxprog_len; + u32 ctxprog_reg; + int ctxprog_label[32]; + u32 ctxvals_pos; + u32 ctxvals_base; +}; + +static inline void +cp_out(struct nouveau_grctx *ctx, u32 inst) +{ + u32 *ctxprog = ctx->data; + + if (ctx->mode != NOUVEAU_GRCTX_PROG) + return; + + BUG_ON(ctx->ctxprog_len == ctx->ctxprog_max); + ctxprog[ctx->ctxprog_len++] = inst; +} + +static inline void +cp_lsr(struct nouveau_grctx *ctx, u32 val) +{ + cp_out(ctx, CP_LOAD_SR | val); +} + +static inline void +cp_ctx(struct nouveau_grctx *ctx, u32 reg, u32 length) +{ + ctx->ctxprog_reg = (reg - 0x00400000) >> 2; + + ctx->ctxvals_base = ctx->ctxvals_pos; + ctx->ctxvals_pos = ctx->ctxvals_base + length; + + if (length > (CP_CTX_COUNT >> CP_CTX_COUNT_SHIFT)) { + cp_lsr(ctx, length); + length = 0; + } + + cp_out(ctx, CP_CTX | (length << CP_CTX_COUNT_SHIFT) | ctx->ctxprog_reg); +} + +static inline void +cp_name(struct nouveau_grctx *ctx, int name) +{ + u32 *ctxprog = ctx->data; + int i; + + if (ctx->mode != NOUVEAU_GRCTX_PROG) + return; + + ctx->ctxprog_label[name] = ctx->ctxprog_len; + for (i = 0; i < ctx->ctxprog_len; i++) { + if ((ctxprog[i] & 0xfff00000) != 0xff400000) + continue; + if ((ctxprog[i] & CP_BRA_IP) != ((name) << CP_BRA_IP_SHIFT)) + continue; + ctxprog[i] = (ctxprog[i] & 0x00ff00ff) | + (ctx->ctxprog_len << CP_BRA_IP_SHIFT); + } +} + +static inline void +_cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name) +{ + int ip = 0; + + if (mod != 2) { + ip = ctx->ctxprog_label[name] << CP_BRA_IP_SHIFT; + if (ip == 0) + ip = 0xff000000 | (name << CP_BRA_IP_SHIFT); + } + + cp_out(ctx, CP_BRA | (mod << 18) | ip | flag | + (state ? 0 : CP_BRA_IF_CLEAR)); +} +#define cp_bra(c, f, s, n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n) +#define cp_cal(c, f, s, n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n) +#define cp_ret(c, f, s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0) + +static inline void +_cp_wait(struct nouveau_grctx *ctx, int flag, int state) +{ + cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0)); +} +#define cp_wait(c, f, s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s) + +static inline void +_cp_set(struct nouveau_grctx *ctx, int flag, int state) +{ + cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0)); +} +#define cp_set(c, f, s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s) + +static inline void +cp_pos(struct nouveau_grctx *ctx, int offset) +{ + ctx->ctxvals_pos = offset; + ctx->ctxvals_base = ctx->ctxvals_pos; + + cp_lsr(ctx, ctx->ctxvals_pos); + cp_out(ctx, CP_SET_CONTEXT_POINTER); +} + +static inline void +gr_def(struct nouveau_grctx *ctx, u32 reg, u32 val) +{ + if (ctx->mode != NOUVEAU_GRCTX_VALS) + return; + + reg = (reg - 0x00400000) / 4; + reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base; + + nv_wo32(ctx->data, reg * 4, val); +} + +#endif diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c new file mode 100644 index 0000000..7bbb1e1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c @@ -0,0 +1,695 @@ +/* + * Copyright 2009 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/gpuobj.h> + +/* NVIDIA context programs handle a number of other conditions which are + * not implemented in our versions. It's not clear why NVIDIA context + * programs have this code, nor whether it's strictly necessary for + * correct operation. We'll implement additional handling if/when we + * discover it's necessary. + * + * - On context save, NVIDIA set 0x400314 bit 0 to 1 if the "3D state" + * flag is set, this gets saved into the context. + * - On context save, the context program for all cards load nsource + * into a flag register and check for ILLEGAL_MTHD. If it's set, + * opcode 0x60000d is called before resuming normal operation. + * - Some context programs check more conditions than the above. NV44 + * checks: ((nsource & 0x0857) || (0x400718 & 0x0100) || (intr & 0x0001)) + * and calls 0x60000d before resuming normal operation. + * - At the very beginning of NVIDIA's context programs, flag 9 is checked + * and if true 0x800001 is called with count=0, pos=0, the flag is cleared + * and then the ctxprog is aborted. It looks like a complicated NOP, + * its purpose is unknown. + * - In the section of code that loads the per-vs state, NVIDIA check + * flag 10. If it's set, they only transfer the small 0x300 byte block + * of state + the state for a single vs as opposed to the state for + * all vs units. It doesn't seem likely that it'll occur in normal + * operation, especially seeing as it appears NVIDIA may have screwed + * up the ctxprogs for some cards and have an invalid instruction + * rather than a cp_lsr(ctx, dwords_for_1_vs_unit) instruction. + * - There's a number of places where context offset 0 (where we place + * the PRAMIN offset of the context) is loaded into either 0x408000, + * 0x408004 or 0x408008. Not sure what's up there either. + * - The ctxprogs for some cards save 0x400a00 again during the cleanup + * path for auto-loadctx. + */ + +#define CP_FLAG_CLEAR 0 +#define CP_FLAG_SET 1 +#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0) +#define CP_FLAG_SWAP_DIRECTION_LOAD 0 +#define CP_FLAG_SWAP_DIRECTION_SAVE 1 +#define CP_FLAG_USER_SAVE ((0 * 32) + 5) +#define CP_FLAG_USER_SAVE_NOT_PENDING 0 +#define CP_FLAG_USER_SAVE_PENDING 1 +#define CP_FLAG_USER_LOAD ((0 * 32) + 6) +#define CP_FLAG_USER_LOAD_NOT_PENDING 0 +#define CP_FLAG_USER_LOAD_PENDING 1 +#define CP_FLAG_STATUS ((3 * 32) + 0) +#define CP_FLAG_STATUS_IDLE 0 +#define CP_FLAG_STATUS_BUSY 1 +#define CP_FLAG_AUTO_SAVE ((3 * 32) + 4) +#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0 +#define CP_FLAG_AUTO_SAVE_PENDING 1 +#define CP_FLAG_AUTO_LOAD ((3 * 32) + 5) +#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0 +#define CP_FLAG_AUTO_LOAD_PENDING 1 +#define CP_FLAG_UNK54 ((3 * 32) + 6) +#define CP_FLAG_UNK54_CLEAR 0 +#define CP_FLAG_UNK54_SET 1 +#define CP_FLAG_ALWAYS ((3 * 32) + 8) +#define CP_FLAG_ALWAYS_FALSE 0 +#define CP_FLAG_ALWAYS_TRUE 1 +#define CP_FLAG_UNK57 ((3 * 32) + 9) +#define CP_FLAG_UNK57_CLEAR 0 +#define CP_FLAG_UNK57_SET 1 + +#define CP_CTX 0x00100000 +#define CP_CTX_COUNT 0x000fc000 +#define CP_CTX_COUNT_SHIFT 14 +#define CP_CTX_REG 0x00003fff +#define CP_LOAD_SR 0x00200000 +#define CP_LOAD_SR_VALUE 0x000fffff +#define CP_BRA 0x00400000 +#define CP_BRA_IP 0x0000ff00 +#define CP_BRA_IP_SHIFT 8 +#define CP_BRA_IF_CLEAR 0x00000080 +#define CP_BRA_FLAG 0x0000007f +#define CP_WAIT 0x00500000 +#define CP_WAIT_SET 0x00000080 +#define CP_WAIT_FLAG 0x0000007f +#define CP_SET 0x00700000 +#define CP_SET_1 0x00000080 +#define CP_SET_FLAG 0x0000007f +#define CP_NEXT_TO_SWAP 0x00600007 +#define CP_NEXT_TO_CURRENT 0x00600009 +#define CP_SET_CONTEXT_POINTER 0x0060000a +#define CP_END 0x0060000e +#define CP_LOAD_MAGIC_UNK01 0x00800001 /* unknown */ +#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */ +#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */ + +#include "nv40.h" +#include "ctx.h" + +/* TODO: + * - get vs count from 0x1540 + */ + +static int +nv40_graph_vs_count(struct nouveau_device *device) +{ + + switch (device->chipset) { + case 0x47: + case 0x49: + case 0x4b: + return 8; + case 0x40: + return 6; + case 0x41: + case 0x42: + return 5; + case 0x43: + case 0x44: + case 0x46: + case 0x4a: + return 3; + case 0x4c: + case 0x4e: + case 0x67: + default: + return 1; + } +} + + +enum cp_label { + cp_check_load = 1, + cp_setup_auto_load, + cp_setup_load, + cp_setup_save, + cp_swap_state, + cp_swap_state3d_3_is_save, + cp_prepare_exit, + cp_exit, +}; + +static void +nv40_graph_construct_general(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + int i; + + cp_ctx(ctx, 0x4000a4, 1); + gr_def(ctx, 0x4000a4, 0x00000008); + cp_ctx(ctx, 0x400144, 58); + gr_def(ctx, 0x400144, 0x00000001); + cp_ctx(ctx, 0x400314, 1); + gr_def(ctx, 0x400314, 0x00000000); + cp_ctx(ctx, 0x400400, 10); + cp_ctx(ctx, 0x400480, 10); + cp_ctx(ctx, 0x400500, 19); + gr_def(ctx, 0x400514, 0x00040000); + gr_def(ctx, 0x400524, 0x55555555); + gr_def(ctx, 0x400528, 0x55555555); + gr_def(ctx, 0x40052c, 0x55555555); + gr_def(ctx, 0x400530, 0x55555555); + cp_ctx(ctx, 0x400560, 6); + gr_def(ctx, 0x400568, 0x0000ffff); + gr_def(ctx, 0x40056c, 0x0000ffff); + cp_ctx(ctx, 0x40057c, 5); + cp_ctx(ctx, 0x400710, 3); + gr_def(ctx, 0x400710, 0x20010001); + gr_def(ctx, 0x400714, 0x0f73ef00); + cp_ctx(ctx, 0x400724, 1); + gr_def(ctx, 0x400724, 0x02008821); + cp_ctx(ctx, 0x400770, 3); + if (device->chipset == 0x40) { + cp_ctx(ctx, 0x400814, 4); + cp_ctx(ctx, 0x400828, 5); + cp_ctx(ctx, 0x400840, 5); + gr_def(ctx, 0x400850, 0x00000040); + cp_ctx(ctx, 0x400858, 4); + gr_def(ctx, 0x400858, 0x00000040); + gr_def(ctx, 0x40085c, 0x00000040); + gr_def(ctx, 0x400864, 0x80000000); + cp_ctx(ctx, 0x40086c, 9); + gr_def(ctx, 0x40086c, 0x80000000); + gr_def(ctx, 0x400870, 0x80000000); + gr_def(ctx, 0x400874, 0x80000000); + gr_def(ctx, 0x400878, 0x80000000); + gr_def(ctx, 0x400888, 0x00000040); + gr_def(ctx, 0x40088c, 0x80000000); + cp_ctx(ctx, 0x4009c0, 8); + gr_def(ctx, 0x4009cc, 0x80000000); + gr_def(ctx, 0x4009dc, 0x80000000); + } else { + cp_ctx(ctx, 0x400840, 20); + if (nv44_graph_class(ctx->device)) { + for (i = 0; i < 8; i++) + gr_def(ctx, 0x400860 + (i * 4), 0x00000001); + } + gr_def(ctx, 0x400880, 0x00000040); + gr_def(ctx, 0x400884, 0x00000040); + gr_def(ctx, 0x400888, 0x00000040); + cp_ctx(ctx, 0x400894, 11); + gr_def(ctx, 0x400894, 0x00000040); + if (!nv44_graph_class(ctx->device)) { + for (i = 0; i < 8; i++) + gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000); + } + cp_ctx(ctx, 0x4008e0, 2); + cp_ctx(ctx, 0x4008f8, 2); + if (device->chipset == 0x4c || + (device->chipset & 0xf0) == 0x60) + cp_ctx(ctx, 0x4009f8, 1); + } + cp_ctx(ctx, 0x400a00, 73); + gr_def(ctx, 0x400b0c, 0x0b0b0b0c); + cp_ctx(ctx, 0x401000, 4); + cp_ctx(ctx, 0x405004, 1); + switch (device->chipset) { + case 0x47: + case 0x49: + case 0x4b: + cp_ctx(ctx, 0x403448, 1); + gr_def(ctx, 0x403448, 0x00001010); + break; + default: + cp_ctx(ctx, 0x403440, 1); + switch (device->chipset) { + case 0x40: + gr_def(ctx, 0x403440, 0x00000010); + break; + case 0x44: + case 0x46: + case 0x4a: + gr_def(ctx, 0x403440, 0x00003010); + break; + case 0x41: + case 0x42: + case 0x43: + case 0x4c: + case 0x4e: + case 0x67: + default: + gr_def(ctx, 0x403440, 0x00001010); + break; + } + break; + } +} + +static void +nv40_graph_construct_state3d(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + int i; + + if (device->chipset == 0x40) { + cp_ctx(ctx, 0x401880, 51); + gr_def(ctx, 0x401940, 0x00000100); + } else + if (device->chipset == 0x46 || device->chipset == 0x47 || + device->chipset == 0x49 || device->chipset == 0x4b) { + cp_ctx(ctx, 0x401880, 32); + for (i = 0; i < 16; i++) + gr_def(ctx, 0x401880 + (i * 4), 0x00000111); + if (device->chipset == 0x46) + cp_ctx(ctx, 0x401900, 16); + cp_ctx(ctx, 0x401940, 3); + } + cp_ctx(ctx, 0x40194c, 18); + gr_def(ctx, 0x401954, 0x00000111); + gr_def(ctx, 0x401958, 0x00080060); + gr_def(ctx, 0x401974, 0x00000080); + gr_def(ctx, 0x401978, 0xffff0000); + gr_def(ctx, 0x40197c, 0x00000001); + gr_def(ctx, 0x401990, 0x46400000); + if (device->chipset == 0x40) { + cp_ctx(ctx, 0x4019a0, 2); + cp_ctx(ctx, 0x4019ac, 5); + } else { + cp_ctx(ctx, 0x4019a0, 1); + cp_ctx(ctx, 0x4019b4, 3); + } + gr_def(ctx, 0x4019bc, 0xffff0000); + switch (device->chipset) { + case 0x46: + case 0x47: + case 0x49: + case 0x4b: + cp_ctx(ctx, 0x4019c0, 18); + for (i = 0; i < 16; i++) + gr_def(ctx, 0x4019c0 + (i * 4), 0x88888888); + break; + } + cp_ctx(ctx, 0x401a08, 8); + gr_def(ctx, 0x401a10, 0x0fff0000); + gr_def(ctx, 0x401a14, 0x0fff0000); + gr_def(ctx, 0x401a1c, 0x00011100); + cp_ctx(ctx, 0x401a2c, 4); + cp_ctx(ctx, 0x401a44, 26); + for (i = 0; i < 16; i++) + gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000); + gr_def(ctx, 0x401a8c, 0x4b7fffff); + if (device->chipset == 0x40) { + cp_ctx(ctx, 0x401ab8, 3); + } else { + cp_ctx(ctx, 0x401ab8, 1); + cp_ctx(ctx, 0x401ac0, 1); + } + cp_ctx(ctx, 0x401ad0, 8); + gr_def(ctx, 0x401ad0, 0x30201000); + gr_def(ctx, 0x401ad4, 0x70605040); + gr_def(ctx, 0x401ad8, 0xb8a89888); + gr_def(ctx, 0x401adc, 0xf8e8d8c8); + cp_ctx(ctx, 0x401b10, device->chipset == 0x40 ? 2 : 1); + gr_def(ctx, 0x401b10, 0x40100000); + cp_ctx(ctx, 0x401b18, device->chipset == 0x40 ? 6 : 5); + gr_def(ctx, 0x401b28, device->chipset == 0x40 ? + 0x00000004 : 0x00000000); + cp_ctx(ctx, 0x401b30, 25); + gr_def(ctx, 0x401b34, 0x0000ffff); + gr_def(ctx, 0x401b68, 0x435185d6); + gr_def(ctx, 0x401b6c, 0x2155b699); + gr_def(ctx, 0x401b70, 0xfedcba98); + gr_def(ctx, 0x401b74, 0x00000098); + gr_def(ctx, 0x401b84, 0xffffffff); + gr_def(ctx, 0x401b88, 0x00ff7000); + gr_def(ctx, 0x401b8c, 0x0000ffff); + if (device->chipset != 0x44 && device->chipset != 0x4a && + device->chipset != 0x4e) + cp_ctx(ctx, 0x401b94, 1); + cp_ctx(ctx, 0x401b98, 8); + gr_def(ctx, 0x401b9c, 0x00ff0000); + cp_ctx(ctx, 0x401bc0, 9); + gr_def(ctx, 0x401be0, 0x00ffff00); + cp_ctx(ctx, 0x401c00, 192); + for (i = 0; i < 16; i++) { /* fragment texture units */ + gr_def(ctx, 0x401c40 + (i * 4), 0x00018488); + gr_def(ctx, 0x401c80 + (i * 4), 0x00028202); + gr_def(ctx, 0x401d00 + (i * 4), 0x0000aae4); + gr_def(ctx, 0x401d40 + (i * 4), 0x01012000); + gr_def(ctx, 0x401d80 + (i * 4), 0x00080008); + gr_def(ctx, 0x401e00 + (i * 4), 0x00100008); + } + for (i = 0; i < 4; i++) { /* vertex texture units */ + gr_def(ctx, 0x401e90 + (i * 4), 0x0001bc80); + gr_def(ctx, 0x401ea0 + (i * 4), 0x00000202); + gr_def(ctx, 0x401ec0 + (i * 4), 0x00000008); + gr_def(ctx, 0x401ee0 + (i * 4), 0x00080008); + } + cp_ctx(ctx, 0x400f5c, 3); + gr_def(ctx, 0x400f5c, 0x00000002); + cp_ctx(ctx, 0x400f84, 1); +} + +static void +nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + int i; + + cp_ctx(ctx, 0x402000, 1); + cp_ctx(ctx, 0x402404, device->chipset == 0x40 ? 1 : 2); + switch (device->chipset) { + case 0x40: + gr_def(ctx, 0x402404, 0x00000001); + break; + case 0x4c: + case 0x4e: + case 0x67: + gr_def(ctx, 0x402404, 0x00000020); + break; + case 0x46: + case 0x49: + case 0x4b: + gr_def(ctx, 0x402404, 0x00000421); + break; + default: + gr_def(ctx, 0x402404, 0x00000021); + } + if (device->chipset != 0x40) + gr_def(ctx, 0x402408, 0x030c30c3); + switch (device->chipset) { + case 0x44: + case 0x46: + case 0x4a: + case 0x4c: + case 0x4e: + case 0x67: + cp_ctx(ctx, 0x402440, 1); + gr_def(ctx, 0x402440, 0x00011001); + break; + default: + break; + } + cp_ctx(ctx, 0x402480, device->chipset == 0x40 ? 8 : 9); + gr_def(ctx, 0x402488, 0x3e020200); + gr_def(ctx, 0x40248c, 0x00ffffff); + switch (device->chipset) { + case 0x40: + gr_def(ctx, 0x402490, 0x60103f00); + break; + case 0x47: + gr_def(ctx, 0x402490, 0x40103f00); + break; + case 0x41: + case 0x42: + case 0x49: + case 0x4b: + gr_def(ctx, 0x402490, 0x20103f00); + break; + default: + gr_def(ctx, 0x402490, 0x0c103f00); + break; + } + gr_def(ctx, 0x40249c, device->chipset <= 0x43 ? + 0x00020000 : 0x00040000); + cp_ctx(ctx, 0x402500, 31); + gr_def(ctx, 0x402530, 0x00008100); + if (device->chipset == 0x40) + cp_ctx(ctx, 0x40257c, 6); + cp_ctx(ctx, 0x402594, 16); + cp_ctx(ctx, 0x402800, 17); + gr_def(ctx, 0x402800, 0x00000001); + switch (device->chipset) { + case 0x47: + case 0x49: + case 0x4b: + cp_ctx(ctx, 0x402864, 1); + gr_def(ctx, 0x402864, 0x00001001); + cp_ctx(ctx, 0x402870, 3); + gr_def(ctx, 0x402878, 0x00000003); + if (device->chipset != 0x47) { /* belong at end!! */ + cp_ctx(ctx, 0x402900, 1); + cp_ctx(ctx, 0x402940, 1); + cp_ctx(ctx, 0x402980, 1); + cp_ctx(ctx, 0x4029c0, 1); + cp_ctx(ctx, 0x402a00, 1); + cp_ctx(ctx, 0x402a40, 1); + cp_ctx(ctx, 0x402a80, 1); + cp_ctx(ctx, 0x402ac0, 1); + } + break; + case 0x40: + cp_ctx(ctx, 0x402844, 1); + gr_def(ctx, 0x402844, 0x00000001); + cp_ctx(ctx, 0x402850, 1); + break; + default: + cp_ctx(ctx, 0x402844, 1); + gr_def(ctx, 0x402844, 0x00001001); + cp_ctx(ctx, 0x402850, 2); + gr_def(ctx, 0x402854, 0x00000003); + break; + } + + cp_ctx(ctx, 0x402c00, 4); + gr_def(ctx, 0x402c00, device->chipset == 0x40 ? + 0x80800001 : 0x00888001); + switch (device->chipset) { + case 0x47: + case 0x49: + case 0x4b: + cp_ctx(ctx, 0x402c20, 40); + for (i = 0; i < 32; i++) + gr_def(ctx, 0x402c40 + (i * 4), 0xffffffff); + cp_ctx(ctx, 0x4030b8, 13); + gr_def(ctx, 0x4030dc, 0x00000005); + gr_def(ctx, 0x4030e8, 0x0000ffff); + break; + default: + cp_ctx(ctx, 0x402c10, 4); + if (device->chipset == 0x40) + cp_ctx(ctx, 0x402c20, 36); + else + if (device->chipset <= 0x42) + cp_ctx(ctx, 0x402c20, 24); + else + if (device->chipset <= 0x4a) + cp_ctx(ctx, 0x402c20, 16); + else + cp_ctx(ctx, 0x402c20, 8); + cp_ctx(ctx, 0x402cb0, device->chipset == 0x40 ? 12 : 13); + gr_def(ctx, 0x402cd4, 0x00000005); + if (device->chipset != 0x40) + gr_def(ctx, 0x402ce0, 0x0000ffff); + break; + } + + cp_ctx(ctx, 0x403400, device->chipset == 0x40 ? 4 : 3); + cp_ctx(ctx, 0x403410, device->chipset == 0x40 ? 4 : 3); + cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->device)); + for (i = 0; i < nv40_graph_vs_count(ctx->device); i++) + gr_def(ctx, 0x403420 + (i * 4), 0x00005555); + + if (device->chipset != 0x40) { + cp_ctx(ctx, 0x403600, 1); + gr_def(ctx, 0x403600, 0x00000001); + } + cp_ctx(ctx, 0x403800, 1); + + cp_ctx(ctx, 0x403c18, 1); + gr_def(ctx, 0x403c18, 0x00000001); + switch (device->chipset) { + case 0x46: + case 0x47: + case 0x49: + case 0x4b: + cp_ctx(ctx, 0x405018, 1); + gr_def(ctx, 0x405018, 0x08e00001); + cp_ctx(ctx, 0x405c24, 1); + gr_def(ctx, 0x405c24, 0x000e3000); + break; + } + if (device->chipset != 0x4e) + cp_ctx(ctx, 0x405800, 11); + cp_ctx(ctx, 0x407000, 1); +} + +static void +nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx) +{ + int len = nv44_graph_class(ctx->device) ? 0x0084 : 0x0684; + + cp_out (ctx, 0x300000); + cp_lsr (ctx, len - 4); + cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_swap_state3d_3_is_save); + cp_lsr (ctx, len); + cp_name(ctx, cp_swap_state3d_3_is_save); + cp_out (ctx, 0x800001); + + ctx->ctxvals_pos += len; +} + +static void +nv40_graph_construct_shader(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + struct nouveau_gpuobj *obj = ctx->data; + int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset; + int offset, i; + + vs_nr = nv40_graph_vs_count(ctx->device); + vs_nr_b0 = 363; + vs_nr_b1 = device->chipset == 0x40 ? 128 : 64; + if (device->chipset == 0x40) { + b0_offset = 0x2200/4; /* 33a0 */ + b1_offset = 0x55a0/4; /* 1500 */ + vs_len = 0x6aa0/4; + } else + if (device->chipset == 0x41 || device->chipset == 0x42) { + b0_offset = 0x2200/4; /* 2200 */ + b1_offset = 0x4400/4; /* 0b00 */ + vs_len = 0x4f00/4; + } else { + b0_offset = 0x1d40/4; /* 2200 */ + b1_offset = 0x3f40/4; /* 0b00 : 0a40 */ + vs_len = nv44_graph_class(device) ? 0x4980/4 : 0x4a40/4; + } + + cp_lsr(ctx, vs_len * vs_nr + 0x300/4); + cp_out(ctx, nv44_graph_class(device) ? 0x800029 : 0x800041); + + offset = ctx->ctxvals_pos; + ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len)); + + if (ctx->mode != NOUVEAU_GRCTX_VALS) + return; + + offset += 0x0280/4; + for (i = 0; i < 16; i++, offset += 2) + nv_wo32(obj, offset * 4, 0x3f800000); + + for (vs = 0; vs < vs_nr; vs++, offset += vs_len) { + for (i = 0; i < vs_nr_b0 * 6; i += 6) + nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001); + for (i = 0; i < vs_nr_b1 * 4; i += 4) + nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000); + } +} + +static void +nv40_grctx_generate(struct nouveau_grctx *ctx) +{ + /* decide whether we're loading/unloading the context */ + cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save); + cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save); + + cp_name(ctx, cp_check_load); + cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load); + cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load); + cp_bra (ctx, ALWAYS, TRUE, cp_exit); + + /* setup for context load */ + cp_name(ctx, cp_setup_auto_load); + cp_wait(ctx, STATUS, IDLE); + cp_out (ctx, CP_NEXT_TO_SWAP); + cp_name(ctx, cp_setup_load); + cp_wait(ctx, STATUS, IDLE); + cp_set (ctx, SWAP_DIRECTION, LOAD); + cp_out (ctx, 0x00910880); /* ?? */ + cp_out (ctx, 0x00901ffe); /* ?? */ + cp_out (ctx, 0x01940000); /* ?? */ + cp_lsr (ctx, 0x20); + cp_out (ctx, 0x0060000b); /* ?? */ + cp_wait(ctx, UNK57, CLEAR); + cp_out (ctx, 0x0060000c); /* ?? */ + cp_bra (ctx, ALWAYS, TRUE, cp_swap_state); + + /* setup for context save */ + cp_name(ctx, cp_setup_save); + cp_set (ctx, SWAP_DIRECTION, SAVE); + + /* general PGRAPH state */ + cp_name(ctx, cp_swap_state); + cp_pos (ctx, 0x00020/4); + nv40_graph_construct_general(ctx); + cp_wait(ctx, STATUS, IDLE); + + /* 3D state, block 1 */ + cp_bra (ctx, UNK54, CLEAR, cp_prepare_exit); + nv40_graph_construct_state3d(ctx); + cp_wait(ctx, STATUS, IDLE); + + /* 3D state, block 2 */ + nv40_graph_construct_state3d_2(ctx); + + /* Some other block of "random" state */ + nv40_graph_construct_state3d_3(ctx); + + /* Per-vertex shader state */ + cp_pos (ctx, ctx->ctxvals_pos); + nv40_graph_construct_shader(ctx); + + /* pre-exit state updates */ + cp_name(ctx, cp_prepare_exit); + cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load); + cp_bra (ctx, USER_SAVE, PENDING, cp_exit); + cp_out (ctx, CP_NEXT_TO_CURRENT); + + cp_name(ctx, cp_exit); + cp_set (ctx, USER_SAVE, NOT_PENDING); + cp_set (ctx, USER_LOAD, NOT_PENDING); + cp_out (ctx, CP_END); +} + +void +nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem) +{ + nv40_grctx_generate(&(struct nouveau_grctx) { + .device = device, + .mode = NOUVEAU_GRCTX_VALS, + .data = mem, + }); +} + +int +nv40_grctx_init(struct nouveau_device *device, u32 *size) +{ + u32 *ctxprog = kmalloc(256 * 4, GFP_KERNEL), i; + struct nouveau_grctx ctx = { + .device = device, + .mode = NOUVEAU_GRCTX_PROG, + .data = ctxprog, + .ctxprog_max = 256, + }; + + if (!ctxprog) + return -ENOMEM; + + nv40_grctx_generate(&ctx); + + nv_wr32(device, 0x400324, 0); + for (i = 0; i < ctx.ctxprog_len; i++) + nv_wr32(device, 0x400328, ctxprog[i]); + *size = ctx.ctxvals_pos * 4; + + kfree(ctxprog); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c new file mode 100644 index 0000000..552fdbd --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c @@ -0,0 +1,3341 @@ +/* + * Copyright 2009 Marcin Kościelnicki + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <core/gpuobj.h> + +#define CP_FLAG_CLEAR 0 +#define CP_FLAG_SET 1 +#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0) +#define CP_FLAG_SWAP_DIRECTION_LOAD 0 +#define CP_FLAG_SWAP_DIRECTION_SAVE 1 +#define CP_FLAG_UNK01 ((0 * 32) + 1) +#define CP_FLAG_UNK01_CLEAR 0 +#define CP_FLAG_UNK01_SET 1 +#define CP_FLAG_UNK03 ((0 * 32) + 3) +#define CP_FLAG_UNK03_CLEAR 0 +#define CP_FLAG_UNK03_SET 1 +#define CP_FLAG_USER_SAVE ((0 * 32) + 5) +#define CP_FLAG_USER_SAVE_NOT_PENDING 0 +#define CP_FLAG_USER_SAVE_PENDING 1 +#define CP_FLAG_USER_LOAD ((0 * 32) + 6) +#define CP_FLAG_USER_LOAD_NOT_PENDING 0 +#define CP_FLAG_USER_LOAD_PENDING 1 +#define CP_FLAG_UNK0B ((0 * 32) + 0xb) +#define CP_FLAG_UNK0B_CLEAR 0 +#define CP_FLAG_UNK0B_SET 1 +#define CP_FLAG_XFER_SWITCH ((0 * 32) + 0xe) +#define CP_FLAG_XFER_SWITCH_DISABLE 0 +#define CP_FLAG_XFER_SWITCH_ENABLE 1 +#define CP_FLAG_STATE ((0 * 32) + 0x1c) +#define CP_FLAG_STATE_STOPPED 0 +#define CP_FLAG_STATE_RUNNING 1 +#define CP_FLAG_UNK1D ((0 * 32) + 0x1d) +#define CP_FLAG_UNK1D_CLEAR 0 +#define CP_FLAG_UNK1D_SET 1 +#define CP_FLAG_UNK20 ((1 * 32) + 0) +#define CP_FLAG_UNK20_CLEAR 0 +#define CP_FLAG_UNK20_SET 1 +#define CP_FLAG_STATUS ((2 * 32) + 0) +#define CP_FLAG_STATUS_BUSY 0 +#define CP_FLAG_STATUS_IDLE 1 +#define CP_FLAG_AUTO_SAVE ((2 * 32) + 4) +#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0 +#define CP_FLAG_AUTO_SAVE_PENDING 1 +#define CP_FLAG_AUTO_LOAD ((2 * 32) + 5) +#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0 +#define CP_FLAG_AUTO_LOAD_PENDING 1 +#define CP_FLAG_NEWCTX ((2 * 32) + 10) +#define CP_FLAG_NEWCTX_BUSY 0 +#define CP_FLAG_NEWCTX_DONE 1 +#define CP_FLAG_XFER ((2 * 32) + 11) +#define CP_FLAG_XFER_IDLE 0 +#define CP_FLAG_XFER_BUSY 1 +#define CP_FLAG_ALWAYS ((2 * 32) + 13) +#define CP_FLAG_ALWAYS_FALSE 0 +#define CP_FLAG_ALWAYS_TRUE 1 +#define CP_FLAG_INTR ((2 * 32) + 15) +#define CP_FLAG_INTR_NOT_PENDING 0 +#define CP_FLAG_INTR_PENDING 1 + +#define CP_CTX 0x00100000 +#define CP_CTX_COUNT 0x000f0000 +#define CP_CTX_COUNT_SHIFT 16 +#define CP_CTX_REG 0x00003fff +#define CP_LOAD_SR 0x00200000 +#define CP_LOAD_SR_VALUE 0x000fffff +#define CP_BRA 0x00400000 +#define CP_BRA_IP 0x0001ff00 +#define CP_BRA_IP_SHIFT 8 +#define CP_BRA_IF_CLEAR 0x00000080 +#define CP_BRA_FLAG 0x0000007f +#define CP_WAIT 0x00500000 +#define CP_WAIT_SET 0x00000080 +#define CP_WAIT_FLAG 0x0000007f +#define CP_SET 0x00700000 +#define CP_SET_1 0x00000080 +#define CP_SET_FLAG 0x0000007f +#define CP_NEWCTX 0x00600004 +#define CP_NEXT_TO_SWAP 0x00600005 +#define CP_SET_CONTEXT_POINTER 0x00600006 +#define CP_SET_XFER_POINTER 0x00600007 +#define CP_ENABLE 0x00600009 +#define CP_END 0x0060000c +#define CP_NEXT_TO_CURRENT 0x0060000d +#define CP_DISABLE1 0x0090ffff +#define CP_DISABLE2 0x0091ffff +#define CP_XFER_1 0x008000ff +#define CP_XFER_2 0x008800ff +#define CP_SEEK_1 0x00c000ff +#define CP_SEEK_2 0x00c800ff + +#include "nv50.h" +#include "ctx.h" + +#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf) +#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac) + +/* + * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's + * the GPU itself that does context-switching, but it needs a special + * microcode to do it. And it's the driver's task to supply this microcode, + * further known as ctxprog, as well as the initial context values, known + * as ctxvals. + * + * Without ctxprog, you cannot switch contexts. Not even in software, since + * the majority of context [xfer strands] isn't accessible directly. You're + * stuck with a single channel, and you also suffer all the problems resulting + * from missing ctxvals, since you cannot load them. + * + * Without ctxvals, you're stuck with PGRAPH's default context. It's enough to + * run 2d operations, but trying to utilise 3d or CUDA will just lock you up, + * since you don't have... some sort of needed setup. + * + * Nouveau will just disable acceleration if not given ctxprog + ctxvals, since + * it's too much hassle to handle no-ctxprog as a special case. + */ + +/* + * How ctxprogs work. + * + * The ctxprog is written in its own kind of microcode, with very small and + * crappy set of available commands. You upload it to a small [512 insns] + * area of memory on PGRAPH, and it'll be run when PFIFO wants PGRAPH to + * switch channel. or when the driver explicitely requests it. Stuff visible + * to ctxprog consists of: PGRAPH MMIO registers, PGRAPH context strands, + * the per-channel context save area in VRAM [known as ctxvals or grctx], + * 4 flags registers, a scratch register, two grctx pointers, plus many + * random poorly-understood details. + * + * When ctxprog runs, it's supposed to check what operations are asked of it, + * save old context if requested, optionally reset PGRAPH and switch to the + * new channel, and load the new context. Context consists of three major + * parts: subset of MMIO registers and two "xfer areas". + */ + +/* TODO: + * - document unimplemented bits compared to nvidia + * - NVAx: make a TP subroutine, use it. + * - use 0x4008fc instead of 0x1540? + */ + +enum cp_label { + cp_check_load = 1, + cp_setup_auto_load, + cp_setup_load, + cp_setup_save, + cp_swap_state, + cp_prepare_exit, + cp_exit, +}; + +static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx); +static void nv50_graph_construct_xfer1(struct nouveau_grctx *ctx); +static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx); + +/* Main function: construct the ctxprog skeleton, call the other functions. */ + +static int +nv50_grctx_generate(struct nouveau_grctx *ctx) +{ + cp_set (ctx, STATE, RUNNING); + cp_set (ctx, XFER_SWITCH, ENABLE); + /* decide whether we're loading/unloading the context */ + cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save); + cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save); + + cp_name(ctx, cp_check_load); + cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load); + cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load); + cp_bra (ctx, ALWAYS, TRUE, cp_prepare_exit); + + /* setup for context load */ + cp_name(ctx, cp_setup_auto_load); + cp_out (ctx, CP_DISABLE1); + cp_out (ctx, CP_DISABLE2); + cp_out (ctx, CP_ENABLE); + cp_out (ctx, CP_NEXT_TO_SWAP); + cp_set (ctx, UNK01, SET); + cp_name(ctx, cp_setup_load); + cp_out (ctx, CP_NEWCTX); + cp_wait(ctx, NEWCTX, BUSY); + cp_set (ctx, UNK1D, CLEAR); + cp_set (ctx, SWAP_DIRECTION, LOAD); + cp_bra (ctx, UNK0B, SET, cp_prepare_exit); + cp_bra (ctx, ALWAYS, TRUE, cp_swap_state); + + /* setup for context save */ + cp_name(ctx, cp_setup_save); + cp_set (ctx, UNK1D, SET); + cp_wait(ctx, STATUS, BUSY); + cp_wait(ctx, INTR, PENDING); + cp_bra (ctx, STATUS, BUSY, cp_setup_save); + cp_set (ctx, UNK01, SET); + cp_set (ctx, SWAP_DIRECTION, SAVE); + + /* general PGRAPH state */ + cp_name(ctx, cp_swap_state); + cp_set (ctx, UNK03, SET); + cp_pos (ctx, 0x00004/4); + cp_ctx (ctx, 0x400828, 1); /* needed. otherwise, flickering happens. */ + cp_pos (ctx, 0x00100/4); + nv50_graph_construct_mmio(ctx); + nv50_graph_construct_xfer1(ctx); + nv50_graph_construct_xfer2(ctx); + + cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load); + + cp_set (ctx, UNK20, SET); + cp_set (ctx, SWAP_DIRECTION, SAVE); /* no idea why this is needed, but fixes at least one lockup. */ + cp_lsr (ctx, ctx->ctxvals_base); + cp_out (ctx, CP_SET_XFER_POINTER); + cp_lsr (ctx, 4); + cp_out (ctx, CP_SEEK_1); + cp_out (ctx, CP_XFER_1); + cp_wait(ctx, XFER, BUSY); + + /* pre-exit state updates */ + cp_name(ctx, cp_prepare_exit); + cp_set (ctx, UNK01, CLEAR); + cp_set (ctx, UNK03, CLEAR); + cp_set (ctx, UNK1D, CLEAR); + + cp_bra (ctx, USER_SAVE, PENDING, cp_exit); + cp_out (ctx, CP_NEXT_TO_CURRENT); + + cp_name(ctx, cp_exit); + cp_set (ctx, USER_SAVE, NOT_PENDING); + cp_set (ctx, USER_LOAD, NOT_PENDING); + cp_set (ctx, XFER_SWITCH, DISABLE); + cp_set (ctx, STATE, STOPPED); + cp_out (ctx, CP_END); + ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */ + + return 0; +} + +void +nv50_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem) +{ + nv50_grctx_generate(&(struct nouveau_grctx) { + .device = device, + .mode = NOUVEAU_GRCTX_VALS, + .data = mem, + }); +} + +int +nv50_grctx_init(struct nouveau_device *device, u32 *size) +{ + u32 *ctxprog = kmalloc(512 * 4, GFP_KERNEL), i; + struct nouveau_grctx ctx = { + .device = device, + .mode = NOUVEAU_GRCTX_PROG, + .data = ctxprog, + .ctxprog_max = 512, + }; + + if (!ctxprog) + return -ENOMEM; + nv50_grctx_generate(&ctx); + + nv_wr32(device, 0x400324, 0); + for (i = 0; i < ctx.ctxprog_len; i++) + nv_wr32(device, 0x400328, ctxprog[i]); + *size = ctx.ctxvals_pos * 4; + kfree(ctxprog); + return 0; +} + +/* + * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which + * registers to save/restore and the default values for them. + */ + +static void +nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx); + +static void +nv50_graph_construct_mmio(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + int i, j; + int offset, base; + u32 units = nv_rd32 (ctx->device, 0x1540); + + /* 0800: DISPATCH */ + cp_ctx(ctx, 0x400808, 7); + gr_def(ctx, 0x400814, 0x00000030); + cp_ctx(ctx, 0x400834, 0x32); + if (device->chipset == 0x50) { + gr_def(ctx, 0x400834, 0xff400040); + gr_def(ctx, 0x400838, 0xfff00080); + gr_def(ctx, 0x40083c, 0xfff70090); + gr_def(ctx, 0x400840, 0xffe806a8); + } + gr_def(ctx, 0x400844, 0x00000002); + if (IS_NVA3F(device->chipset)) + gr_def(ctx, 0x400894, 0x00001000); + gr_def(ctx, 0x4008e8, 0x00000003); + gr_def(ctx, 0x4008ec, 0x00001000); + if (device->chipset == 0x50) + cp_ctx(ctx, 0x400908, 0xb); + else if (device->chipset < 0xa0) + cp_ctx(ctx, 0x400908, 0xc); + else + cp_ctx(ctx, 0x400908, 0xe); + + if (device->chipset >= 0xa0) + cp_ctx(ctx, 0x400b00, 0x1); + if (IS_NVA3F(device->chipset)) { + cp_ctx(ctx, 0x400b10, 0x1); + gr_def(ctx, 0x400b10, 0x0001629d); + cp_ctx(ctx, 0x400b20, 0x1); + gr_def(ctx, 0x400b20, 0x0001629d); + } + + nv50_graph_construct_mmio_ddata(ctx); + + /* 0C00: VFETCH */ + cp_ctx(ctx, 0x400c08, 0x2); + gr_def(ctx, 0x400c08, 0x0000fe0c); + + /* 1000 */ + if (device->chipset < 0xa0) { + cp_ctx(ctx, 0x401008, 0x4); + gr_def(ctx, 0x401014, 0x00001000); + } else if (!IS_NVA3F(device->chipset)) { + cp_ctx(ctx, 0x401008, 0x5); + gr_def(ctx, 0x401018, 0x00001000); + } else { + cp_ctx(ctx, 0x401008, 0x5); + gr_def(ctx, 0x401018, 0x00004000); + } + + /* 1400 */ + cp_ctx(ctx, 0x401400, 0x8); + cp_ctx(ctx, 0x401424, 0x3); + if (device->chipset == 0x50) + gr_def(ctx, 0x40142c, 0x0001fd87); + else + gr_def(ctx, 0x40142c, 0x00000187); + cp_ctx(ctx, 0x401540, 0x5); + gr_def(ctx, 0x401550, 0x00001018); + + /* 1800: STREAMOUT */ + cp_ctx(ctx, 0x401814, 0x1); + gr_def(ctx, 0x401814, 0x000000ff); + if (device->chipset == 0x50) { + cp_ctx(ctx, 0x40181c, 0xe); + gr_def(ctx, 0x401850, 0x00000004); + } else if (device->chipset < 0xa0) { + cp_ctx(ctx, 0x40181c, 0xf); + gr_def(ctx, 0x401854, 0x00000004); + } else { + cp_ctx(ctx, 0x40181c, 0x13); + gr_def(ctx, 0x401864, 0x00000004); + } + + /* 1C00 */ + cp_ctx(ctx, 0x401c00, 0x1); + switch (device->chipset) { + case 0x50: + gr_def(ctx, 0x401c00, 0x0001005f); + break; + case 0x84: + case 0x86: + case 0x94: + gr_def(ctx, 0x401c00, 0x044d00df); + break; + case 0x92: + case 0x96: + case 0x98: + case 0xa0: + case 0xaa: + case 0xac: + gr_def(ctx, 0x401c00, 0x042500df); + break; + case 0xa3: + case 0xa5: + case 0xa8: + case 0xaf: + gr_def(ctx, 0x401c00, 0x142500df); + break; + } + + /* 2000 */ + + /* 2400 */ + cp_ctx(ctx, 0x402400, 0x1); + if (device->chipset == 0x50) + cp_ctx(ctx, 0x402408, 0x1); + else + cp_ctx(ctx, 0x402408, 0x2); + gr_def(ctx, 0x402408, 0x00000600); + + /* 2800: CSCHED */ + cp_ctx(ctx, 0x402800, 0x1); + if (device->chipset == 0x50) + gr_def(ctx, 0x402800, 0x00000006); + + /* 2C00: ZCULL */ + cp_ctx(ctx, 0x402c08, 0x6); + if (device->chipset != 0x50) + gr_def(ctx, 0x402c14, 0x01000000); + gr_def(ctx, 0x402c18, 0x000000ff); + if (device->chipset == 0x50) + cp_ctx(ctx, 0x402ca0, 0x1); + else + cp_ctx(ctx, 0x402ca0, 0x2); + if (device->chipset < 0xa0) + gr_def(ctx, 0x402ca0, 0x00000400); + else if (!IS_NVA3F(device->chipset)) + gr_def(ctx, 0x402ca0, 0x00000800); + else + gr_def(ctx, 0x402ca0, 0x00000400); + cp_ctx(ctx, 0x402cac, 0x4); + + /* 3000: ENG2D */ + cp_ctx(ctx, 0x403004, 0x1); + gr_def(ctx, 0x403004, 0x00000001); + + /* 3400 */ + if (device->chipset >= 0xa0) { + cp_ctx(ctx, 0x403404, 0x1); + gr_def(ctx, 0x403404, 0x00000001); + } + + /* 5000: CCACHE */ + cp_ctx(ctx, 0x405000, 0x1); + switch (device->chipset) { + case 0x50: + gr_def(ctx, 0x405000, 0x00300080); + break; + case 0x84: + case 0xa0: + case 0xa3: + case 0xa5: + case 0xa8: + case 0xaa: + case 0xac: + case 0xaf: + gr_def(ctx, 0x405000, 0x000e0080); + break; + case 0x86: + case 0x92: + case 0x94: + case 0x96: + case 0x98: + gr_def(ctx, 0x405000, 0x00000080); + break; + } + cp_ctx(ctx, 0x405014, 0x1); + gr_def(ctx, 0x405014, 0x00000004); + cp_ctx(ctx, 0x40501c, 0x1); + cp_ctx(ctx, 0x405024, 0x1); + cp_ctx(ctx, 0x40502c, 0x1); + + /* 6000? */ + if (device->chipset == 0x50) + cp_ctx(ctx, 0x4063e0, 0x1); + + /* 6800: M2MF */ + if (device->chipset < 0x90) { + cp_ctx(ctx, 0x406814, 0x2b); + gr_def(ctx, 0x406818, 0x00000f80); + gr_def(ctx, 0x406860, 0x007f0080); + gr_def(ctx, 0x40689c, 0x007f0080); + } else { + cp_ctx(ctx, 0x406814, 0x4); + if (device->chipset == 0x98) + gr_def(ctx, 0x406818, 0x00000f80); + else + gr_def(ctx, 0x406818, 0x00001f80); + if (IS_NVA3F(device->chipset)) + gr_def(ctx, 0x40681c, 0x00000030); + cp_ctx(ctx, 0x406830, 0x3); + } + + /* 7000: per-ROP group state */ + for (i = 0; i < 8; i++) { + if (units & (1<<(i+16))) { + cp_ctx(ctx, 0x407000 + (i<<8), 3); + if (device->chipset == 0x50) + gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820); + else if (device->chipset != 0xa5) + gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821); + else + gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821); + gr_def(ctx, 0x407004 + (i<<8), 0x89058001); + + if (device->chipset == 0x50) { + cp_ctx(ctx, 0x407010 + (i<<8), 1); + } else if (device->chipset < 0xa0) { + cp_ctx(ctx, 0x407010 + (i<<8), 2); + gr_def(ctx, 0x407010 + (i<<8), 0x00001000); + gr_def(ctx, 0x407014 + (i<<8), 0x0000001f); + } else { + cp_ctx(ctx, 0x407010 + (i<<8), 3); + gr_def(ctx, 0x407010 + (i<<8), 0x00001000); + if (device->chipset != 0xa5) + gr_def(ctx, 0x407014 + (i<<8), 0x000000ff); + else + gr_def(ctx, 0x407014 + (i<<8), 0x000001ff); + } + + cp_ctx(ctx, 0x407080 + (i<<8), 4); + if (device->chipset != 0xa5) + gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa); + else + gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa); + if (device->chipset == 0x50) + gr_def(ctx, 0x407084 + (i<<8), 0x000000c0); + else + gr_def(ctx, 0x407084 + (i<<8), 0x400000c0); + gr_def(ctx, 0x407088 + (i<<8), 0xb7892080); + + if (device->chipset < 0xa0) + cp_ctx(ctx, 0x407094 + (i<<8), 1); + else if (!IS_NVA3F(device->chipset)) + cp_ctx(ctx, 0x407094 + (i<<8), 3); + else { + cp_ctx(ctx, 0x407094 + (i<<8), 4); + gr_def(ctx, 0x4070a0 + (i<<8), 1); + } + } + } + + cp_ctx(ctx, 0x407c00, 0x3); + if (device->chipset < 0x90) + gr_def(ctx, 0x407c00, 0x00010040); + else if (device->chipset < 0xa0) + gr_def(ctx, 0x407c00, 0x00390040); + else + gr_def(ctx, 0x407c00, 0x003d0040); + gr_def(ctx, 0x407c08, 0x00000022); + if (device->chipset >= 0xa0) { + cp_ctx(ctx, 0x407c10, 0x3); + cp_ctx(ctx, 0x407c20, 0x1); + cp_ctx(ctx, 0x407c2c, 0x1); + } + + if (device->chipset < 0xa0) { + cp_ctx(ctx, 0x407d00, 0x9); + } else { + cp_ctx(ctx, 0x407d00, 0x15); + } + if (device->chipset == 0x98) + gr_def(ctx, 0x407d08, 0x00380040); + else { + if (device->chipset < 0x90) + gr_def(ctx, 0x407d08, 0x00010040); + else if (device->chipset < 0xa0) + gr_def(ctx, 0x407d08, 0x00390040); + else + gr_def(ctx, 0x407d08, 0x003d0040); + gr_def(ctx, 0x407d0c, 0x00000022); + } + + /* 8000+: per-TP state */ + for (i = 0; i < 10; i++) { + if (units & (1<<i)) { + if (device->chipset < 0xa0) + base = 0x408000 + (i<<12); + else + base = 0x408000 + (i<<11); + if (device->chipset < 0xa0) + offset = base + 0xc00; + else + offset = base + 0x80; + cp_ctx(ctx, offset + 0x00, 1); + gr_def(ctx, offset + 0x00, 0x0000ff0a); + cp_ctx(ctx, offset + 0x08, 1); + + /* per-MP state */ + for (j = 0; j < (device->chipset < 0xa0 ? 2 : 4); j++) { + if (!(units & (1 << (j+24)))) continue; + if (device->chipset < 0xa0) + offset = base + 0x200 + (j<<7); + else + offset = base + 0x100 + (j<<7); + cp_ctx(ctx, offset, 0x20); + gr_def(ctx, offset + 0x00, 0x01800000); + gr_def(ctx, offset + 0x04, 0x00160000); + gr_def(ctx, offset + 0x08, 0x01800000); + gr_def(ctx, offset + 0x18, 0x0003ffff); + switch (device->chipset) { + case 0x50: + gr_def(ctx, offset + 0x1c, 0x00080000); + break; + case 0x84: + gr_def(ctx, offset + 0x1c, 0x00880000); + break; + case 0x86: + gr_def(ctx, offset + 0x1c, 0x018c0000); + break; + case 0x92: + case 0x96: + case 0x98: + gr_def(ctx, offset + 0x1c, 0x118c0000); + break; + case 0x94: + gr_def(ctx, offset + 0x1c, 0x10880000); + break; + case 0xa0: + case 0xa5: + gr_def(ctx, offset + 0x1c, 0x310c0000); + break; + case 0xa3: + case 0xa8: + case 0xaa: + case 0xac: + case 0xaf: + gr_def(ctx, offset + 0x1c, 0x300c0000); + break; + } + gr_def(ctx, offset + 0x40, 0x00010401); + if (device->chipset == 0x50) + gr_def(ctx, offset + 0x48, 0x00000040); + else + gr_def(ctx, offset + 0x48, 0x00000078); + gr_def(ctx, offset + 0x50, 0x000000bf); + gr_def(ctx, offset + 0x58, 0x00001210); + if (device->chipset == 0x50) + gr_def(ctx, offset + 0x5c, 0x00000080); + else + gr_def(ctx, offset + 0x5c, 0x08000080); + if (device->chipset >= 0xa0) + gr_def(ctx, offset + 0x68, 0x0000003e); + } + + if (device->chipset < 0xa0) + cp_ctx(ctx, base + 0x300, 0x4); + else + cp_ctx(ctx, base + 0x300, 0x5); + if (device->chipset == 0x50) + gr_def(ctx, base + 0x304, 0x00007070); + else if (device->chipset < 0xa0) + gr_def(ctx, base + 0x304, 0x00027070); + else if (!IS_NVA3F(device->chipset)) + gr_def(ctx, base + 0x304, 0x01127070); + else + gr_def(ctx, base + 0x304, 0x05127070); + + if (device->chipset < 0xa0) + cp_ctx(ctx, base + 0x318, 1); + else + cp_ctx(ctx, base + 0x320, 1); + if (device->chipset == 0x50) + gr_def(ctx, base + 0x318, 0x0003ffff); + else if (device->chipset < 0xa0) + gr_def(ctx, base + 0x318, 0x03ffffff); + else + gr_def(ctx, base + 0x320, 0x07ffffff); + + if (device->chipset < 0xa0) + cp_ctx(ctx, base + 0x324, 5); + else + cp_ctx(ctx, base + 0x328, 4); + + if (device->chipset < 0xa0) { + cp_ctx(ctx, base + 0x340, 9); + offset = base + 0x340; + } else if (!IS_NVA3F(device->chipset)) { + cp_ctx(ctx, base + 0x33c, 0xb); + offset = base + 0x344; + } else { + cp_ctx(ctx, base + 0x33c, 0xd); + offset = base + 0x344; + } + gr_def(ctx, offset + 0x0, 0x00120407); + gr_def(ctx, offset + 0x4, 0x05091507); + if (device->chipset == 0x84) + gr_def(ctx, offset + 0x8, 0x05100202); + else + gr_def(ctx, offset + 0x8, 0x05010202); + gr_def(ctx, offset + 0xc, 0x00030201); + if (device->chipset == 0xa3) + cp_ctx(ctx, base + 0x36c, 1); + + cp_ctx(ctx, base + 0x400, 2); + gr_def(ctx, base + 0x404, 0x00000040); + cp_ctx(ctx, base + 0x40c, 2); + gr_def(ctx, base + 0x40c, 0x0d0c0b0a); + gr_def(ctx, base + 0x410, 0x00141210); + + if (device->chipset < 0xa0) + offset = base + 0x800; + else + offset = base + 0x500; + cp_ctx(ctx, offset, 6); + gr_def(ctx, offset + 0x0, 0x000001f0); + gr_def(ctx, offset + 0x4, 0x00000001); + gr_def(ctx, offset + 0x8, 0x00000003); + if (device->chipset == 0x50 || IS_NVAAF(device->chipset)) + gr_def(ctx, offset + 0xc, 0x00008000); + gr_def(ctx, offset + 0x14, 0x00039e00); + cp_ctx(ctx, offset + 0x1c, 2); + if (device->chipset == 0x50) + gr_def(ctx, offset + 0x1c, 0x00000040); + else + gr_def(ctx, offset + 0x1c, 0x00000100); + gr_def(ctx, offset + 0x20, 0x00003800); + + if (device->chipset >= 0xa0) { + cp_ctx(ctx, base + 0x54c, 2); + if (!IS_NVA3F(device->chipset)) + gr_def(ctx, base + 0x54c, 0x003fe006); + else + gr_def(ctx, base + 0x54c, 0x003fe007); + gr_def(ctx, base + 0x550, 0x003fe000); + } + + if (device->chipset < 0xa0) + offset = base + 0xa00; + else + offset = base + 0x680; + cp_ctx(ctx, offset, 1); + gr_def(ctx, offset, 0x00404040); + + if (device->chipset < 0xa0) + offset = base + 0xe00; + else + offset = base + 0x700; + cp_ctx(ctx, offset, 2); + if (device->chipset < 0xa0) + gr_def(ctx, offset, 0x0077f005); + else if (device->chipset == 0xa5) + gr_def(ctx, offset, 0x6cf7f007); + else if (device->chipset == 0xa8) + gr_def(ctx, offset, 0x6cfff007); + else if (device->chipset == 0xac) + gr_def(ctx, offset, 0x0cfff007); + else + gr_def(ctx, offset, 0x0cf7f007); + if (device->chipset == 0x50) + gr_def(ctx, offset + 0x4, 0x00007fff); + else if (device->chipset < 0xa0) + gr_def(ctx, offset + 0x4, 0x003f7fff); + else + gr_def(ctx, offset + 0x4, 0x02bf7fff); + cp_ctx(ctx, offset + 0x2c, 1); + if (device->chipset == 0x50) { + cp_ctx(ctx, offset + 0x50, 9); + gr_def(ctx, offset + 0x54, 0x000003ff); + gr_def(ctx, offset + 0x58, 0x00000003); + gr_def(ctx, offset + 0x5c, 0x00000003); + gr_def(ctx, offset + 0x60, 0x000001ff); + gr_def(ctx, offset + 0x64, 0x0000001f); + gr_def(ctx, offset + 0x68, 0x0000000f); + gr_def(ctx, offset + 0x6c, 0x0000000f); + } else if (device->chipset < 0xa0) { + cp_ctx(ctx, offset + 0x50, 1); + cp_ctx(ctx, offset + 0x70, 1); + } else { + cp_ctx(ctx, offset + 0x50, 1); + cp_ctx(ctx, offset + 0x60, 5); + } + } + } +} + +static void +dd_emit(struct nouveau_grctx *ctx, int num, u32 val) { + int i; + if (val && ctx->mode == NOUVEAU_GRCTX_VALS) + for (i = 0; i < num; i++) + nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val); + ctx->ctxvals_pos += num; +} + +static void +nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + int base, num; + base = ctx->ctxvals_pos; + + /* tesla state */ + dd_emit(ctx, 1, 0); /* 00000001 UNK0F90 */ + dd_emit(ctx, 1, 0); /* 00000001 UNK135C */ + + /* SRC_TIC state */ + dd_emit(ctx, 1, 0); /* 00000007 SRC_TILE_MODE_Z */ + dd_emit(ctx, 1, 2); /* 00000007 SRC_TILE_MODE_Y */ + dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */ + dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */ + dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */ + if (device->chipset >= 0x94) + dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */ + dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */ + dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */ + + /* turing state */ + dd_emit(ctx, 1, 0); /* 0000000f TEXTURES_LOG2 */ + dd_emit(ctx, 1, 0); /* 0000000f SAMPLERS_LOG2 */ + dd_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */ + dd_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */ + dd_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */ + dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */ + dd_emit(ctx, 1, 1); /* 0000ffff BLOCK_ALLOC_THREADS */ + dd_emit(ctx, 1, 1); /* 00000001 LANES32 */ + dd_emit(ctx, 1, 0); /* 000000ff UNK370 */ + dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_UNK */ + dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_COUNT */ + dd_emit(ctx, 1, 1); /* 000000ff UNK384 bits 8-15 */ + dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ + dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ + dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */ + dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_X */ + dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_XMY */ + dd_emit(ctx, 1, 0); /* 00000001 BLOCKDIM_XMY_OVERFLOW */ + dd_emit(ctx, 1, 1); /* 0003ffff BLOCKDIM_XMYMZ */ + dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_Y */ + dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */ + dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */ + dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */ + if (IS_NVA3F(device->chipset)) + dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */ + dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */ + dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */ + dd_emit(ctx, 1, 7); /* 00000007 LOCAL_WARPS_LOG_ALLOC */ + dd_emit(ctx, 1, 1); /* 00000007 STACK_WARPS_NO_CLAMP */ + dd_emit(ctx, 1, 7); /* 00000007 STACK_WARPS_LOG_ALLOC */ + dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_PACKED */ + dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_STRIDED */ + dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */ + + /* compat 2d state */ + if (device->chipset == 0x50) { + dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */ + + dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */ + + dd_emit(ctx, 1, 1); /* ffffffff pattern COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff pattern SHAPE */ + dd_emit(ctx, 1, 1); /* ffffffff pattern PATTERN_SELECT */ + + dd_emit(ctx, 1, 0xa); /* ffffffff surf2d SRC_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff surf2d DMA_SRC */ + dd_emit(ctx, 1, 0); /* 000000ff surf2d SRC_ADDRESS_HIGH */ + dd_emit(ctx, 1, 0); /* ffffffff surf2d SRC_ADDRESS_LOW */ + dd_emit(ctx, 1, 0x40); /* 0000ffff surf2d SRC_PITCH */ + dd_emit(ctx, 1, 0); /* 0000000f surf2d SRC_TILE_MODE_Z */ + dd_emit(ctx, 1, 2); /* 0000000f surf2d SRC_TILE_MODE_Y */ + dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_HEIGHT */ + dd_emit(ctx, 1, 1); /* 00000001 surf2d SRC_LINEAR */ + dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_WIDTH */ + + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_X */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_Y */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_X */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_Y */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_X */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_Y */ + dd_emit(ctx, 1, 1); /* ffffffff gdirect COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff gdirect OPERATION */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_X */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_Y */ + + dd_emit(ctx, 1, 0); /* 0000ffff blit SRC_Y */ + dd_emit(ctx, 1, 0); /* ffffffff blit OPERATION */ + + dd_emit(ctx, 1, 0); /* ffffffff ifc OPERATION */ + + dd_emit(ctx, 1, 0); /* ffffffff iifc INDEX_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff iifc LUT_OFFSET */ + dd_emit(ctx, 1, 4); /* ffffffff iifc COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff iifc OPERATION */ + } + + /* m2mf state */ + dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_COUNT */ + dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_LENGTH_IN */ + dd_emit(ctx, 2, 0); /* ffffffff m2mf OFFSET_IN, OFFSET_OUT */ + dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_OUT */ + dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_OUT */ + dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_OUT_Z */ + dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_OUT */ + dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_OUT_X, Y */ + dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_OUT */ + dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_IN */ + dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_IN */ + dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_IN_Z */ + dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_IN */ + dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_IN_X, Y */ + dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */ + + /* more compat 2d state */ + if (device->chipset == 0x50) { + dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */ + + dd_emit(ctx, 1, 1); /* ffffffff triangle COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff triangle OPERATION */ + + dd_emit(ctx, 1, 0); /* 0000000f sifm TILE_MODE_Z */ + dd_emit(ctx, 1, 2); /* 0000000f sifm TILE_MODE_Y */ + dd_emit(ctx, 1, 0); /* 000000ff sifm FORMAT_FILTER */ + dd_emit(ctx, 1, 1); /* 000000ff sifm FORMAT_ORIGIN */ + dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_PITCH */ + dd_emit(ctx, 1, 1); /* 00000001 sifm SRC_LINEAR */ + dd_emit(ctx, 1, 0); /* 000000ff sifm SRC_OFFSET_HIGH */ + dd_emit(ctx, 1, 0); /* ffffffff sifm SRC_OFFSET */ + dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_HEIGHT */ + dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_WIDTH */ + dd_emit(ctx, 1, 3); /* ffffffff sifm COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff sifm OPERATION */ + + dd_emit(ctx, 1, 0); /* ffffffff sifc OPERATION */ + } + + /* tesla state */ + dd_emit(ctx, 1, 0); /* 0000000f GP_TEXTURES_LOG2 */ + dd_emit(ctx, 1, 0); /* 0000000f GP_SAMPLERS_LOG2 */ + dd_emit(ctx, 1, 0); /* 000000ff */ + dd_emit(ctx, 1, 0); /* ffffffff */ + dd_emit(ctx, 1, 4); /* 000000ff UNK12B0_0 */ + dd_emit(ctx, 1, 0x70); /* 000000ff UNK12B0_1 */ + dd_emit(ctx, 1, 0x80); /* 000000ff UNK12B0_3 */ + dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */ + dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */ + dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */ + if (IS_NVA3F(device->chipset)) { + dd_emit(ctx, 1, 0); /* ffffffff */ + dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */ + } else { + dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */ + } + dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */ + if (device->chipset != 0x50) + dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */ + dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */ + dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */ + if (device->chipset == 0x50) { + dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */ + dd_emit(ctx, 1, 0); /* 00000001 */ + } else { + dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_PTSZ.ENABLE */ + dd_emit(ctx, 1, 0x29); /* 000000ff SEMANTIC_PTSZ.PTSZ_ID */ + dd_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM */ + dd_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ + dd_emit(ctx, 1, 8); /* 0000000f SMENATIC_CLIP.CLIP_HIGH */ + dd_emit(ctx, 1, 4); /* 000000ff SEMANTIC_CLIP.CLIP_LO */ + dd_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */ + dd_emit(ctx, 1, 0); /* 00000001 UNK1900 */ + } + dd_emit(ctx, 1, 0); /* 00000007 RT_CONTROL_MAP0 */ + dd_emit(ctx, 1, 1); /* 00000007 RT_CONTROL_MAP1 */ + dd_emit(ctx, 1, 2); /* 00000007 RT_CONTROL_MAP2 */ + dd_emit(ctx, 1, 3); /* 00000007 RT_CONTROL_MAP3 */ + dd_emit(ctx, 1, 4); /* 00000007 RT_CONTROL_MAP4 */ + dd_emit(ctx, 1, 5); /* 00000007 RT_CONTROL_MAP5 */ + dd_emit(ctx, 1, 6); /* 00000007 RT_CONTROL_MAP6 */ + dd_emit(ctx, 1, 7); /* 00000007 RT_CONTROL_MAP7 */ + dd_emit(ctx, 1, 1); /* 0000000f RT_CONTROL_COUNT */ + dd_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_UNK */ + dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */ + dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */ + dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */ + if (device->chipset != 0x50) + dd_emit(ctx, 3, 0); /* 1, 1, 1 */ + else + dd_emit(ctx, 2, 0); /* 1, 1 */ + dd_emit(ctx, 1, 0); /* ffffffff GP_ENABLE */ + dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/ + dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ + dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + if (IS_NVA3F(device->chipset)) { + dd_emit(ctx, 1, 3); /* 00000003 */ + dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */ + } + if (device->chipset != 0x50) + dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */ + dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */ + dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */ + if (device->chipset != 0x50) + dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */ + dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */ + dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */ + dd_emit(ctx, 1, 0xc); /* 000000ff FP_INTERPOLANT_CTRL.OFFSET */ + dd_emit(ctx, 1, 1); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.W */ + dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.X */ + dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Y */ + dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Z */ + dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ + dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */ + dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ + if (device->chipset >= 0xa0) + dd_emit(ctx, 1, 0); /* ffffffff */ + dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */ + dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */ + dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ + dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ + dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/ + if (device->chipset != 0x50) + dd_emit(ctx, 8, 0); /* 00000001 */ + if (device->chipset >= 0xa0) { + dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */ + dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */ + dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */ + dd_emit(ctx, 1, 0); /* 000000ff VTX_ATTR_DEFINE.ATTR */ + } + dd_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + dd_emit(ctx, 1, 0x14); /* 0000001f ZETA_FORMAT */ + dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */ + dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */ + if (IS_NVA3F(device->chipset)) + dd_emit(ctx, 1, 0); /* 00000001 */ + dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */ + if (device->chipset >= 0xa0) + dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */ + dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */ + if (device->chipset >= 0xa0) + dd_emit(ctx, 1, 0); /* 00000003 */ + dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */ + dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */ + dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */ + dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */ + dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */ + if (device->chipset != 0x50) { + dd_emit(ctx, 1, 0xe00); /* 7fff */ + dd_emit(ctx, 1, 0x1000); /* 7fff */ + dd_emit(ctx, 1, 0x1e00); /* 7fff */ + } + dd_emit(ctx, 1, 0); /* 00000001 BEGIN_END_ACTIVE */ + dd_emit(ctx, 1, 1); /* 00000001 POLYGON_MODE_??? */ + dd_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP / 4 rounded up */ + dd_emit(ctx, 1, 1); /* 000000ff FP_REG_ALLOC_TEMP... without /4? */ + dd_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP / 4 rounded up */ + dd_emit(ctx, 1, 1); /* 00000001 */ + dd_emit(ctx, 1, 0); /* 00000001 */ + dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */ + dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */ + dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */ + if (IS_NVA3F(device->chipset)) + dd_emit(ctx, 1, 0x200); + dd_emit(ctx, 1, 0); /* 00000001 */ + if (device->chipset < 0xa0) { + dd_emit(ctx, 1, 1); /* 00000001 */ + dd_emit(ctx, 1, 0x70); /* 000000ff */ + dd_emit(ctx, 1, 0x80); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 00000001 */ + dd_emit(ctx, 1, 1); /* 00000001 */ + dd_emit(ctx, 1, 0x70); /* 000000ff */ + dd_emit(ctx, 1, 0x80); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 000000ff */ + } else { + dd_emit(ctx, 1, 1); /* 00000001 */ + dd_emit(ctx, 1, 0xf0); /* 000000ff */ + dd_emit(ctx, 1, 0xff); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 00000001 */ + dd_emit(ctx, 1, 1); /* 00000001 */ + dd_emit(ctx, 1, 0xf0); /* 000000ff */ + dd_emit(ctx, 1, 0xff); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 000000ff */ + dd_emit(ctx, 1, 9); /* 0000003f UNK114C.COMP,SIZE */ + } + + /* eng2d state */ + dd_emit(ctx, 1, 0); /* 00000001 eng2d COLOR_KEY_ENABLE */ + dd_emit(ctx, 1, 0); /* 00000007 eng2d COLOR_KEY_FORMAT */ + dd_emit(ctx, 1, 1); /* ffffffff eng2d DST_DEPTH */ + dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DST_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff eng2d DST_LAYER */ + dd_emit(ctx, 1, 1); /* 00000001 eng2d DST_LINEAR */ + dd_emit(ctx, 1, 0); /* 00000007 eng2d PATTERN_COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* 00000007 eng2d OPERATION */ + dd_emit(ctx, 1, 0); /* 00000003 eng2d PATTERN_SELECT */ + dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SIFC_FORMAT */ + dd_emit(ctx, 1, 0); /* 00000001 eng2d SIFC_BITMAP_ENABLE */ + dd_emit(ctx, 1, 2); /* 00000003 eng2d SIFC_BITMAP_UNK808 */ + dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DU_DX_FRACT */ + dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DU_DX_INT */ + dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DV_DY_FRACT */ + dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DV_DY_INT */ + dd_emit(ctx, 1, 0); /* 00000001 eng2d BLIT_CONTROL_FILTER */ + dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DRAW_COLOR_FORMAT */ + dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SRC_FORMAT */ + dd_emit(ctx, 1, 1); /* 00000001 eng2d SRC_LINEAR #2 */ + + num = ctx->ctxvals_pos - base; + ctx->ctxvals_pos = base; + if (IS_NVA3F(device->chipset)) + cp_ctx(ctx, 0x404800, num); + else + cp_ctx(ctx, 0x405400, num); +} + +/* + * xfer areas. These are a pain. + * + * There are 2 xfer areas: the first one is big and contains all sorts of + * stuff, the second is small and contains some per-TP context. + * + * Each area is split into 8 "strands". The areas, when saved to grctx, + * are made of 8-word blocks. Each block contains a single word from + * each strand. The strands are independent of each other, their + * addresses are unrelated to each other, and data in them is closely + * packed together. The strand layout varies a bit between cards: here + * and there, a single word is thrown out in the middle and the whole + * strand is offset by a bit from corresponding one on another chipset. + * For this reason, addresses of stuff in strands are almost useless. + * Knowing sequence of stuff and size of gaps between them is much more + * useful, and that's how we build the strands in our generator. + * + * NVA0 takes this mess to a whole new level by cutting the old strands + * into a few dozen pieces [known as genes], rearranging them randomly, + * and putting them back together to make new strands. Hopefully these + * genes correspond more or less directly to the same PGRAPH subunits + * as in 400040 register. + * + * The most common value in default context is 0, and when the genes + * are separated by 0's, gene bounduaries are quite speculative... + * some of them can be clearly deduced, others can be guessed, and yet + * others won't be resolved without figuring out the real meaning of + * given ctxval. For the same reason, ending point of each strand + * is unknown. Except for strand 0, which is the longest strand and + * its end corresponds to end of the whole xfer. + * + * An unsolved mystery is the seek instruction: it takes an argument + * in bits 8-18, and that argument is clearly the place in strands to + * seek to... but the offsets don't seem to correspond to offsets as + * seen in grctx. Perhaps there's another, real, not randomly-changing + * addressing in strands, and the xfer insn just happens to skip over + * the unused bits? NV10-NV30 PIPE comes to mind... + * + * As far as I know, there's no way to access the xfer areas directly + * without the help of ctxprog. + */ + +static void +xf_emit(struct nouveau_grctx *ctx, int num, u32 val) { + int i; + if (val && ctx->mode == NOUVEAU_GRCTX_VALS) + for (i = 0; i < num; i++) + nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val); + ctx->ctxvals_pos += num << 3; +} + +/* Gene declarations... */ + +static void nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx); +static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx); + +static void +nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + int i; + int offset; + int size = 0; + u32 units = nv_rd32 (ctx->device, 0x1540); + + offset = (ctx->ctxvals_pos+0x3f)&~0x3f; + ctx->ctxvals_base = offset; + + if (device->chipset < 0xa0) { + /* Strand 0 */ + ctx->ctxvals_pos = offset; + nv50_graph_construct_gene_dispatch(ctx); + nv50_graph_construct_gene_m2mf(ctx); + nv50_graph_construct_gene_unk24xx(ctx); + nv50_graph_construct_gene_clipid(ctx); + nv50_graph_construct_gene_zcull(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 1 */ + ctx->ctxvals_pos = offset + 0x1; + nv50_graph_construct_gene_vfetch(ctx); + nv50_graph_construct_gene_eng2d(ctx); + nv50_graph_construct_gene_csched(ctx); + nv50_graph_construct_gene_ropm1(ctx); + nv50_graph_construct_gene_ropm2(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 2 */ + ctx->ctxvals_pos = offset + 0x2; + nv50_graph_construct_gene_ccache(ctx); + nv50_graph_construct_gene_unk1cxx(ctx); + nv50_graph_construct_gene_strmout(ctx); + nv50_graph_construct_gene_unk14xx(ctx); + nv50_graph_construct_gene_unk10xx(ctx); + nv50_graph_construct_gene_unk34xx(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 3: per-ROP group state */ + ctx->ctxvals_pos = offset + 3; + for (i = 0; i < 6; i++) + if (units & (1 << (i + 16))) + nv50_graph_construct_gene_ropc(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strands 4-7: per-TP state */ + for (i = 0; i < 4; i++) { + ctx->ctxvals_pos = offset + 4 + i; + if (units & (1 << (2 * i))) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << (2 * i + 1))) + nv50_graph_construct_xfer_tp(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + } + } else { + /* Strand 0 */ + ctx->ctxvals_pos = offset; + nv50_graph_construct_gene_dispatch(ctx); + nv50_graph_construct_gene_m2mf(ctx); + nv50_graph_construct_gene_unk34xx(ctx); + nv50_graph_construct_gene_csched(ctx); + nv50_graph_construct_gene_unk1cxx(ctx); + nv50_graph_construct_gene_strmout(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 1 */ + ctx->ctxvals_pos = offset + 1; + nv50_graph_construct_gene_unk10xx(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 2 */ + ctx->ctxvals_pos = offset + 2; + if (device->chipset == 0xa0) + nv50_graph_construct_gene_unk14xx(ctx); + nv50_graph_construct_gene_unk24xx(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 3 */ + ctx->ctxvals_pos = offset + 3; + nv50_graph_construct_gene_vfetch(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 4 */ + ctx->ctxvals_pos = offset + 4; + nv50_graph_construct_gene_ccache(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 5 */ + ctx->ctxvals_pos = offset + 5; + nv50_graph_construct_gene_ropm2(ctx); + nv50_graph_construct_gene_ropm1(ctx); + /* per-ROP context */ + for (i = 0; i < 8; i++) + if (units & (1<<(i+16))) + nv50_graph_construct_gene_ropc(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 6 */ + ctx->ctxvals_pos = offset + 6; + nv50_graph_construct_gene_zcull(ctx); + nv50_graph_construct_gene_clipid(ctx); + nv50_graph_construct_gene_eng2d(ctx); + if (units & (1 << 0)) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << 1)) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << 2)) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << 3)) + nv50_graph_construct_xfer_tp(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 7 */ + ctx->ctxvals_pos = offset + 7; + if (device->chipset == 0xa0) { + if (units & (1 << 4)) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << 5)) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << 6)) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << 7)) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << 8)) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << 9)) + nv50_graph_construct_xfer_tp(ctx); + } else { + nv50_graph_construct_gene_unk14xx(ctx); + } + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + } + + ctx->ctxvals_pos = offset + size * 8; + ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f; + cp_lsr (ctx, offset); + cp_out (ctx, CP_SET_XFER_POINTER); + cp_lsr (ctx, size); + cp_out (ctx, CP_SEEK_1); + cp_out (ctx, CP_XFER_1); + cp_wait(ctx, XFER, BUSY); +} + +/* + * non-trivial demagiced parts of ctx init go here + */ + +static void +nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx) +{ + /* start of strand 0 */ + struct nouveau_device *device = ctx->device; + /* SEEK */ + if (device->chipset == 0x50) + xf_emit(ctx, 5, 0); + else if (!IS_NVA3F(device->chipset)) + xf_emit(ctx, 6, 0); + else + xf_emit(ctx, 4, 0); + /* SEEK */ + /* the PGRAPH's internal FIFO */ + if (device->chipset == 0x50) + xf_emit(ctx, 8*3, 0); + else + xf_emit(ctx, 0x100*3, 0); + /* and another bonus slot?!? */ + xf_emit(ctx, 3, 0); + /* and YET ANOTHER bonus slot? */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 3, 0); + /* SEEK */ + /* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */ + xf_emit(ctx, 9, 0); + /* SEEK */ + xf_emit(ctx, 9, 0); + /* SEEK */ + xf_emit(ctx, 9, 0); + /* SEEK */ + xf_emit(ctx, 9, 0); + /* SEEK */ + if (device->chipset < 0x90) + xf_emit(ctx, 4, 0); + /* SEEK */ + xf_emit(ctx, 2, 0); + /* SEEK */ + xf_emit(ctx, 6*2, 0); + xf_emit(ctx, 2, 0); + /* SEEK */ + xf_emit(ctx, 2, 0); + /* SEEK */ + xf_emit(ctx, 6*2, 0); + xf_emit(ctx, 2, 0); + /* SEEK */ + if (device->chipset == 0x50) + xf_emit(ctx, 0x1c, 0); + else if (device->chipset < 0xa0) + xf_emit(ctx, 0x1e, 0); + else + xf_emit(ctx, 0x22, 0); + /* SEEK */ + xf_emit(ctx, 0x15, 0); +} + +static void +nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx) +{ + /* Strand 0, right after dispatch */ + struct nouveau_device *device = ctx->device; + int smallm2mf = 0; + if (device->chipset < 0x92 || device->chipset == 0x98) + smallm2mf = 1; + /* SEEK */ + xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */ + xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */ + xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */ + xf_emit (ctx, 1, 0); /* OFFSET_IN */ + xf_emit (ctx, 1, 0); /* OFFSET_OUT */ + xf_emit (ctx, 1, 0); /* PITCH_IN */ + xf_emit (ctx, 1, 0); /* PITCH_OUT */ + xf_emit (ctx, 1, 0); /* LINE_LENGTH */ + xf_emit (ctx, 1, 0); /* LINE_COUNT */ + xf_emit (ctx, 1, 0x21); /* FORMAT: bits 0-4 INPUT_INC, bits 5-9 OUTPUT_INC */ + xf_emit (ctx, 1, 1); /* LINEAR_IN */ + xf_emit (ctx, 1, 0x2); /* TILING_MODE_IN: bits 0-2 y tiling, bits 3-5 z tiling */ + xf_emit (ctx, 1, 0x100); /* TILING_PITCH_IN */ + xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_IN */ + xf_emit (ctx, 1, 1); /* TILING_DEPTH_IN */ + xf_emit (ctx, 1, 0); /* TILING_POSITION_IN_Z */ + xf_emit (ctx, 1, 0); /* TILING_POSITION_IN */ + xf_emit (ctx, 1, 1); /* LINEAR_OUT */ + xf_emit (ctx, 1, 0x2); /* TILING_MODE_OUT: bits 0-2 y tiling, bits 3-5 z tiling */ + xf_emit (ctx, 1, 0x100); /* TILING_PITCH_OUT */ + xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_OUT */ + xf_emit (ctx, 1, 1); /* TILING_DEPTH_OUT */ + xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT_Z */ + xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */ + xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */ + xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */ + /* SEEK */ + if (smallm2mf) + xf_emit(ctx, 0x40, 0); /* 20 * ffffffff, 3ffff */ + else + xf_emit(ctx, 0x100, 0); /* 80 * ffffffff, 3ffff */ + xf_emit(ctx, 4, 0); /* 1f/7f, 0, 1f/7f, 0 [1f for smallm2mf, 7f otherwise] */ + /* SEEK */ + if (smallm2mf) + xf_emit(ctx, 0x400, 0); /* ffffffff */ + else + xf_emit(ctx, 0x800, 0); /* ffffffff */ + xf_emit(ctx, 4, 0); /* ff/1ff, 0, 0, 0 [ff for smallm2mf, 1ff otherwise] */ + /* SEEK */ + xf_emit(ctx, 0x40, 0); /* 20 * bits ffffffff, 3ffff */ + xf_emit(ctx, 0x6, 0); /* 1f, 0, 1f, 0, 1f, 0 */ +} + +static void +nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + xf_emit(ctx, 2, 0); /* RO */ + xf_emit(ctx, 0x800, 0); /* ffffffff */ + switch (device->chipset) { + case 0x50: + case 0x92: + case 0xa0: + xf_emit(ctx, 0x2b, 0); + break; + case 0x84: + xf_emit(ctx, 0x29, 0); + break; + case 0x94: + case 0x96: + case 0xa3: + xf_emit(ctx, 0x27, 0); + break; + case 0x86: + case 0x98: + case 0xa5: + case 0xa8: + case 0xaa: + case 0xac: + case 0xaf: + xf_emit(ctx, 0x25, 0); + break; + } + /* CB bindings, 0x80 of them. first word is address >> 8, second is + * size >> 4 | valid << 24 */ + xf_emit(ctx, 0x100, 0); /* ffffffff CB_DEF */ + xf_emit(ctx, 1, 0); /* 0000007f CB_ADDR_BUFFER */ + xf_emit(ctx, 1, 0); /* 0 */ + xf_emit(ctx, 0x30, 0); /* ff SET_PROGRAM_CB */ + xf_emit(ctx, 1, 0); /* 3f last SET_PROGRAM_CB */ + xf_emit(ctx, 4, 0); /* RO */ + xf_emit(ctx, 0x100, 0); /* ffffffff */ + xf_emit(ctx, 8, 0); /* 1f, 0, 0, ... */ + xf_emit(ctx, 8, 0); /* ffffffff */ + xf_emit(ctx, 4, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 3 */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_CODE_CB */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_TIC */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_TSC */ + xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */ + xf_emit(ctx, 1, 0); /* 000000ff TIC_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff TIC_ADDRESS_LOW */ + xf_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ + xf_emit(ctx, 1, 0); /* 000000ff TSC_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff TSC_ADDRESS_LOW */ + xf_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ + xf_emit(ctx, 1, 0); /* 000000ff VP_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff VP_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 00ffffff VP_START_ID */ + xf_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0); /* 000000ff GP_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff GP_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 00ffffff GP_START_ID */ + xf_emit(ctx, 1, 0); /* 000000ff FP_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 00ffffff FP_START_ID */ +} + +static void +nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + int i; + /* end of area 2 on pre-NVA0, area 1 on NVAx */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ + xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ + if (device->chipset == 0x50) + xf_emit(ctx, 1, 0x3ff); + else + xf_emit(ctx, 1, 0x7ff); /* 000007ff */ + xf_emit(ctx, 1, 0); /* 111/113 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + for (i = 0; i < 8; i++) { + switch (device->chipset) { + case 0x50: + case 0x86: + case 0x98: + case 0xaa: + case 0xac: + xf_emit(ctx, 0xa0, 0); /* ffffffff */ + break; + case 0x84: + case 0x92: + case 0x94: + case 0x96: + xf_emit(ctx, 0x120, 0); + break; + case 0xa5: + case 0xa8: + xf_emit(ctx, 0x100, 0); /* ffffffff */ + break; + case 0xa0: + case 0xa3: + case 0xaf: + xf_emit(ctx, 0x400, 0); /* ffffffff */ + break; + } + xf_emit(ctx, 4, 0); /* 3f, 0, 0, 0 */ + xf_emit(ctx, 4, 0); /* ffffffff */ + } + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ + xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_TEMP */ + xf_emit(ctx, 1, 1); /* 00000001 RASTERIZE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ +} + +static void +nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + /* end of area 2 on pre-NVA0, area 1 on NVAx */ + xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ + xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */ + xf_emit(ctx, 0x10, 0x04000000); /* 07ffffff VIEWPORT_CLIP_HORIZ*8, VIEWPORT_CLIP_VERT*8 */ + xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */ + xf_emit(ctx, 0x20, 0); /* ffffffff POLYGON_STIPPLE */ + xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */ + xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */ + if (device->chipset >= 0xa0) + xf_emit(ctx, 1, 0x0fac6881); + if (IS_NVA3F(device->chipset)) { + xf_emit(ctx, 1, 1); + xf_emit(ctx, 3, 0); + } +} + +static void +nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */ + if (device->chipset != 0x50) { + xf_emit(ctx, 5, 0); /* ffffffff */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 2, 4); /* 7f, ff */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + } + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */ + if (device->chipset != 0x50) + xf_emit(ctx, 1, 0); /* 3ff */ + xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */ + xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */ + xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ + if (device->chipset != 0x50) + xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0F8C */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0); /* 0000000f */ + if (device->chipset == 0x50) + xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ + else + xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ + xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_SCALE: X0, Y0, Z0, X1, Y1, ... */ + xf_emit(ctx, 3, 0); /* f, 0, 0 */ + xf_emit(ctx, 3, 0); /* ffffffff last VIEWPORT_SCALE? */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ + xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_TRANSLATE */ + xf_emit(ctx, 3, 0); /* f, 0, 0 */ + xf_emit(ctx, 3, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 2, 0x88); /* 000001ff tesla UNK19D8 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ + xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ + xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ + xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ + if (device->chipset != 0x50) { + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + } + xf_emit(ctx, 0x20, 0); /* 10xbits ffffffff, 3fffff. SCISSOR_* */ + xf_emit(ctx, 1, 0); /* f */ + xf_emit(ctx, 1, 0); /* 0? */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 003fffff */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ + xf_emit(ctx, 1, 0); /* 0000000f */ +} + +static void +nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + /* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */ + /* SEEK */ + xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */ + xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ + xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */ + xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ + xf_emit(ctx, 1, 0); /* 0000ffff */ + xf_emit(ctx, 1, 0); /* 00000001 UNK0FB0 */ + xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ + xf_emit(ctx, 1, 0); /* 00000007 */ + if (device->chipset != 0x50) + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */ + /* SEEK */ + xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ + xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ + xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ + xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ + xf_emit(ctx, 1, 0x10); /* 7f/ff/3ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ + xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */ + if (device->chipset != 0x50) + xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */ +} + +static void +nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx) +{ + /* middle of strand 0 on pre-NVA0 [after 24xx], middle of area 6 on NVAx */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000007 UNK0FB4 */ + /* SEEK */ + xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_HORIZ */ + xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_VERT */ + xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ + xf_emit(ctx, 2, 0x04000000); /* 07ffffff UNK1508 */ + xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */ + xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_WIDTH */ + xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ID */ + xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff CLIPID_ADDRESS_LOW */ + xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_HEIGHT */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_CLIPID */ +} + +static void +nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + int i; + /* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */ + /* SEEK */ + xf_emit(ctx, 0x33, 0); + /* SEEK */ + xf_emit(ctx, 2, 0); + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + /* SEEK */ + if (IS_NVA3F(device->chipset)) { + xf_emit(ctx, 4, 0); /* RO */ + xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ + xf_emit(ctx, 1, 0); /* 1ff */ + xf_emit(ctx, 8, 0); /* 0? */ + xf_emit(ctx, 9, 0); /* ffffffff, 7ff */ + + xf_emit(ctx, 4, 0); /* RO */ + xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ + xf_emit(ctx, 1, 0); /* 1ff */ + xf_emit(ctx, 8, 0); /* 0? */ + xf_emit(ctx, 9, 0); /* ffffffff, 7ff */ + } else { + xf_emit(ctx, 0xc, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ + xf_emit(ctx, 1, 0); /* 1ff */ + xf_emit(ctx, 8, 0); /* 0? */ + + /* SEEK */ + xf_emit(ctx, 0xc, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ + xf_emit(ctx, 1, 0); /* 1ff */ + xf_emit(ctx, 8, 0); /* 0? */ + } + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + if (device->chipset != 0x50) + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 1); /* 00000001 */ + /* SEEK */ + if (device->chipset >= 0xa0) + xf_emit(ctx, 2, 4); /* 000000ff */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM_ID */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 1); /* 00000001 */ + for (i = 0; i < 10; i++) { + /* SEEK */ + xf_emit(ctx, 0x40, 0); /* ffffffff */ + xf_emit(ctx, 0x10, 0); /* 3, 0, 0.... */ + xf_emit(ctx, 0x10, 0); /* ffffffff */ + } + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_CTRL */ + xf_emit(ctx, 1, 1); /* 00000001 */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */ + xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + if (device->chipset != 0x50) + xf_emit(ctx, 1, 0); /* 000003ff */ +} + +static void +nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + int acnt = 0x10, rep, i; + /* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */ + if (IS_NVA3F(device->chipset)) + acnt = 0x20; + /* SEEK */ + if (device->chipset >= 0xa0) { + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */ + xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */ + } + xf_emit(ctx, 1, 0); /* ffffffff VERTEX_BUFFER_FIRST */ + xf_emit(ctx, 1, 0); /* 00000001 PRIMITIVE_RESTART_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 UNK0DE8 */ + xf_emit(ctx, 1, 0); /* ffffffff PRIMITIVE_RESTART_INDEX */ + xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATR_MASK_UNK0DD0 */ + xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ + xf_emit(ctx, 1, 0x20); /* 0000ffff tesla UNK129C */ + xf_emit(ctx, 1, 0); /* 000000ff turing UNK370??? */ + xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + /* SEEK */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 0xb, 0); /* RO */ + else if (device->chipset >= 0xa0) + xf_emit(ctx, 0x9, 0); /* RO */ + else + xf_emit(ctx, 0x8, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000001 EDGE_FLAG */ + xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ + /* SEEK */ + xf_emit(ctx, 0xc, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 7f/ff */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ + xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */ + xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + if (device->chipset == 0x50) + xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */ + else + xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */ + if (device->chipset == 0xa8) + xf_emit(ctx, 1, 0x1e00); /* 7fff */ + /* SEEK */ + xf_emit(ctx, 0xc, 0); /* RO or close */ + /* SEEK */ + xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ + if (device->chipset > 0x50 && device->chipset < 0xa0) + xf_emit(ctx, 2, 0); /* ffffffff */ + else + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */ + /* SEEK */ + if (IS_NVA3F(device->chipset)) { + xf_emit(ctx, 0x10, 0); /* 0? */ + xf_emit(ctx, 2, 0); /* weird... */ + xf_emit(ctx, 2, 0); /* RO */ + } else { + xf_emit(ctx, 8, 0); /* 0? */ + xf_emit(ctx, 1, 0); /* weird... */ + xf_emit(ctx, 2, 0); /* RO */ + } + /* SEEK */ + xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */ + xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */ + xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */ + if (device->chipset >= 0xa0) + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */ + xf_emit(ctx, 1, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */ + xf_emit(ctx, 1, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* RO */ + xf_emit(ctx, 2, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK111C? */ + xf_emit(ctx, 1, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 000000ff UNK15F4_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff UNK15F4_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 000000ff UNK0F84_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff UNK0F84_ADDRESS_LOW */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* 00003fff VERTEX_ARRAY_ATTRIB_OFFSET */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* 00000fff VERTEX_ARRAY_STRIDE */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_LOW */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_ARRAY_HIGH */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_LIMIT_LOW */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + if (IS_NVA3F(device->chipset)) { + xf_emit(ctx, acnt, 0); /* f */ + xf_emit(ctx, 3, 0); /* f/1f */ + } + /* SEEK */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 2, 0); /* RO */ + else + xf_emit(ctx, 5, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */ + /* SEEK */ + if (device->chipset < 0xa0) { + xf_emit(ctx, 0x41, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 0x11, 0); /* RO */ + } else if (!IS_NVA3F(device->chipset)) + xf_emit(ctx, 0x50, 0); /* RO */ + else + xf_emit(ctx, 0x58, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, 1, 1); /* 1 UNK0DEC */ + /* SEEK */ + xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */ + xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */ + /* SEEK */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 0x1d, 0); /* RO */ + else + xf_emit(ctx, 0x16, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ + /* SEEK */ + if (device->chipset < 0xa0) + xf_emit(ctx, 8, 0); /* RO */ + else if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 0xc, 0); /* RO */ + else + xf_emit(ctx, 7, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 0xa, 0); /* RO */ + if (device->chipset == 0xa0) + rep = 0xc; + else + rep = 4; + for (i = 0; i < rep; i++) { + /* SEEK */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 0x20, 0); /* ffffffff */ + xf_emit(ctx, 0x200, 0); /* ffffffff */ + xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */ + xf_emit(ctx, 4, 0); /* ffffffff */ + } + /* SEEK */ + xf_emit(ctx, 1, 0); /* 113/111 */ + xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATTR_MASK_UNK0DD0 */ + xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + /* SEEK */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 7, 0); /* weird... */ + else + xf_emit(ctx, 5, 0); /* weird... */ +} + +static void +nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + /* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */ + /* SEEK */ + xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */ + xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */ + xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */ + if (device->chipset < 0xa0) { + /* this is useless on everything but the original NV50, + * guess they forgot to nuke it. Or just didn't bother. */ + xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */ + xf_emit(ctx, 2, 1); /* 0000ffff IFC_CLIP_W, H */ + xf_emit(ctx, 1, 0); /* 00000001 IFC_CLIP_ENABLE */ + } + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */ + xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */ + xf_emit(ctx, 1, 0x11); /* 3f[NV50]/7f[NV84+] DST_FORMAT */ + xf_emit(ctx, 1, 0); /* 0001ffff DRAW_POINT_X */ + xf_emit(ctx, 1, 8); /* 0000000f DRAW_UNK58C */ + xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_X_FRACT */ + xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_X_INT */ + xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_Y_FRACT */ + xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_Y_INT */ + xf_emit(ctx, 1, 0); /* 000fffff SIFC_DX_DU_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DX_DU_INT */ + xf_emit(ctx, 1, 0); /* 000fffff SIFC_DY_DV_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DY_DV_INT */ + xf_emit(ctx, 1, 1); /* 0000ffff SIFC_WIDTH */ + xf_emit(ctx, 1, 1); /* 0000ffff SIFC_HEIGHT */ + xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */ + xf_emit(ctx, 1, 2); /* 00000003 SIFC_BITMAP_UNK808 */ + xf_emit(ctx, 1, 0); /* 00000003 SIFC_BITMAP_LINE_PACK_MODE */ + xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_LSB_FIRST */ + xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_X */ + xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_Y */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */ + xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_W */ + xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_H */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_X_FRACT */ + xf_emit(ctx, 1, 0); /* 0001ffff BLIT_SRC_X_INT */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_Y_FRACT */ + xf_emit(ctx, 1, 0); /* 00000001 UNK888 */ + xf_emit(ctx, 1, 4); /* 0000003f UNK884 */ + xf_emit(ctx, 1, 0); /* 00000007 UNK880 */ + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK0FB8 */ + xf_emit(ctx, 1, 0x15); /* 000000ff tesla UNK128C */ + xf_emit(ctx, 2, 0); /* 00000007, ffff0ff3 */ + xf_emit(ctx, 1, 0); /* 00000001 UNK260 */ + xf_emit(ctx, 1, 0x4444480); /* 1fffffff UNK870 */ + /* SEEK */ + xf_emit(ctx, 0x10, 0); + /* SEEK */ + xf_emit(ctx, 0x27, 0); +} + +static void +nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + /* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */ + /* SEEK */ + xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 0); /* 000003ff */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* ffffffff turing UNK364 */ + xf_emit(ctx, 1, 0); /* 0000000f turing UNK36C */ + xf_emit(ctx, 1, 0); /* 0000ffff USER_PARAM_COUNT */ + xf_emit(ctx, 1, 0x100); /* 00ffffff turing UNK384 */ + xf_emit(ctx, 1, 0); /* 0000000f turing UNK2A0 */ + xf_emit(ctx, 1, 0); /* 0000ffff GRIDID */ + xf_emit(ctx, 1, 0x10001); /* ffffffff GRIDDIM_XY */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */ + xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */ + xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */ + xf_emit(ctx, 1, 1); /* 00000001 LANES32 */ + xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ + xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ + /* SEEK */ + xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */ + switch (device->chipset) { + case 0x50: + case 0x92: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0x80, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 0x10*2, 0); /* ffffffff, 1f */ + break; + case 0x84: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0x60, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */ + break; + case 0x94: + case 0x96: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0x40, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 8*2, 0); /* ffffffff, 1f */ + break; + case 0x86: + case 0x98: + xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */ + xf_emit(ctx, 0x10, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */ + break; + case 0xa0: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0xf0, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 0x1e*2, 0); /* ffffffff, 1f */ + break; + case 0xa3: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0x60, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */ + break; + case 0xa5: + case 0xaf: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0x30, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 6*2, 0); /* ffffffff, 1f */ + break; + case 0xaa: + xf_emit(ctx, 0x12, 0); + break; + case 0xa8: + case 0xac: + xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */ + xf_emit(ctx, 0x10, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */ + break; + } + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 0); /* 00000000 */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 0000001f */ + xf_emit(ctx, 4, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 4, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 000000ff */ +} + +static void +nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ + xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */ + xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */ + xf_emit(ctx, 3, 0); /* 00000001 POLYGON_OFFSET_*_ENABLE */ + xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */ + xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ + xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_UNITS */ + xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_FACTOR */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */ + xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */ + xf_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_LINEAR */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */ + else if (device->chipset >= 0xa0) + xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */ + xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ + xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ + xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 5); /* 0000000f UNK1408 */ + xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */ + xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */ + if (device->chipset != 0x50) { + xf_emit(ctx, 1, 0); /* 3ff */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */ + } + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */ + xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ + xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ + xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 0x20, 0); /* 07ffffff VIEWPORT_HORIZ, then VIEWPORT_VERT. (W&0x3fff)<<13 | (X&0x1fff). */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK187C */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 5); /* 0000000f tesla UNK1220 */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1A20 */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ + xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ + if (device->chipset != 0x50) + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ + if (device->chipset < 0xa0) + xf_emit(ctx, 0x1c, 0); /* RO */ + else if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 0x9, 0); + xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + if (device->chipset != 0x50) { + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ + xf_emit(ctx, 1, 0); /* 3ff */ + } + /* XXX: the following block could belong either to unk1cxx, or + * to STRMOUT. Rather hard to tell. */ + if (device->chipset < 0xa0) + xf_emit(ctx, 0x25, 0); + else + xf_emit(ctx, 0x3b, 0); +} + +static void +nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */ + xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */ + xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ + if (device->chipset >= 0xa0) { + xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ + xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ + } + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + if (device->chipset == 0x50) + xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ + else + xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + /* SEEK */ + xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */ + xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */ + xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */ + xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */ + xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ + if (device->chipset >= 0xa0) { + xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ + xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ + } + xf_emit(ctx, 1, 0); /* 0000ffff DMA_STRMOUT */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ + xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ + xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW QUERY_COUNTER */ + xf_emit(ctx, 2, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + /* SEEK */ + xf_emit(ctx, 0x20, 0); /* ffffffff STRMOUT_MAP */ + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 0); /* 00000000? */ + xf_emit(ctx, 2, 0); /* ffffffff */ +} + +static void +nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */ + xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ +} + +static void +nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + /* SEEK */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 2, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ + xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 0); /* 7 */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ + xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ + xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */ + xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */ + xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */ + xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* 00000007 */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ +} + +static void +nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + int magic2; + if (device->chipset == 0x50) { + magic2 = 0x00003e60; + } else if (!IS_NVA3F(device->chipset)) { + magic2 = 0x001ffe67; + } else { + magic2 = 0x00087e67; + } + xf_emit(ctx, 1, 0); /* f/7 MUTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + if (device->chipset >= 0xa0 && !IS_NVAAF(device->chipset)) + xf_emit(ctx, 1, 0x15); /* 000000ff */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 0x10); /* 3ff/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + if (device->chipset == 0x86 || device->chipset == 0x92 || device->chipset == 0x98 || device->chipset >= 0xa0) { + xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */ + xf_emit(ctx, 1, 4); /* 7 */ + xf_emit(ctx, 1, 0x400); /* fffffff */ + xf_emit(ctx, 1, 0x300); /* ffff */ + xf_emit(ctx, 1, 0x1001); /* 1fff */ + if (device->chipset != 0xa0) { + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */ + else + xf_emit(ctx, 1, 0x15); /* ff */ + } + } + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ + xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ + xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 2, 0); /* ffff0ff3, ffff */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ + if (device->chipset >= 0xa0) { + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 0x1001); + xf_emit(ctx, 0xb, 0); + } else { + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + } + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, 0x11); /* 3f/7f */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + if (device->chipset != 0x50) { + xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ + xf_emit(ctx, 1, 0); /* 000000ff */ + } + xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ + xf_emit(ctx, 2, 1); /* 00000007 BLEND_EQUATION_RGB, ALPHA */ + xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + if (IS_NVA3F(device->chipset)) { + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ + xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */ + xf_emit(ctx, 2, 0); /* 00000001 */ + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 2, 0); /* 00000001 */ + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + } else if (device->chipset >= 0xa0) { + xf_emit(ctx, 2, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 2, 0); /* 00000001 */ + } else { + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1430 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + } + xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */ + xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */ + xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */ + if (device->chipset >= 0xa0) + xf_emit(ctx, 2, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ + xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ + if (device->chipset >= 0xa0) + xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */ + if (IS_NVA3F(device->chipset)) { + xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK15C4 */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */ + } + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + xf_emit(ctx, 1, 0); /* 00000007 PATTERN_COLOR_FORMAT */ + xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 PATTERN_MONO_FORMAT */ + xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_BITMAP */ + xf_emit(ctx, 1, 0); /* 00000003 PATTERN_SELECT */ + xf_emit(ctx, 1, 0); /* 000000ff ROP */ + xf_emit(ctx, 1, 0); /* ffffffff BETA1 */ + xf_emit(ctx, 1, 0); /* ffffffff BETA4 */ + xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ + xf_emit(ctx, 0x50, 0); /* 10x ffffff, ffffff, ffffff, ffffff, 3 PATTERN */ +} + +static void +nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + int magic3; + switch (device->chipset) { + case 0x50: + magic3 = 0x1000; + break; + case 0x86: + case 0x98: + case 0xa8: + case 0xaa: + case 0xac: + case 0xaf: + magic3 = 0x1e00; + break; + default: + magic3 = 0; + } + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 7f/ff[NVA0+] VP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 0x1f, 0); /* ffffffff */ + else if (device->chipset >= 0xa0) + xf_emit(ctx, 0x0f, 0); /* ffffffff */ + else + xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */ + xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ + if (device->chipset >= 0xa0) + xf_emit(ctx, 1, 0x03020100); /* ffffffff */ + else + xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 2, 0); /* 111/113, 7f/ff */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ + if (magic3) + xf_emit(ctx, 1, magic3); /* 00007fff tesla UNK141C */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 111/113 */ + xf_emit(ctx, 0x1f, 0); /* ffffffff GP_RESULT_MAP_1 up */ + xf_emit(ctx, 1, 0); /* 0000001f */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0x03020100); /* ffffffff GP_RESULT_MAP_0 */ + xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ + if (magic3) + xf_emit(ctx, 1, magic3); /* 7fff tesla UNK141C */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 111/113 */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ + xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK13A0 */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 111/113 */ + if (device->chipset == 0x94 || device->chipset == 0x96) + xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ + else if (device->chipset < 0xa0) + xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ + else if (!IS_NVA3F(device->chipset)) + xf_emit(ctx, 0x210, 0); /* ffffffff */ + else + xf_emit(ctx, 0x410, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ + xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ +} + +static void +nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + int magic1, magic2; + if (device->chipset == 0x50) { + magic1 = 0x3ff; + magic2 = 0x00003e60; + } else if (!IS_NVA3F(device->chipset)) { + magic1 = 0x7ff; + magic2 = 0x001ffe67; + } else { + magic1 = 0x7ff; + magic2 = 0x00087e67; + } + xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */ + xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */ + xf_emit(ctx, 1, 0); /* 00000001 UNK0FDC */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + xf_emit(ctx, 1, 0); /* ff[NV50]/3ff[NV84+] */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ + xf_emit(ctx, 1, 0); /* 7 */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff COLOR_KEY */ + xf_emit(ctx, 1, 0); /* 00000001 COLOR_KEY_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 COLOR_KEY_FORMAT */ + xf_emit(ctx, 2, 0); /* ffffffff SIFC_BITMAP_COLOR */ + xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ + if (IS_NVA3F(device->chipset)) { + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */ + } else if (device->chipset >= 0xa0) { + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */ + xf_emit(ctx, 1, 0); /* 00000003 */ + } else { + xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ + } + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ + if (IS_NVA3F(device->chipset)) { + xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ + xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_RGB */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_RGB */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_ALPHA */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_ALPHA */ + xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ + } + xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ + xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ + xf_emit(ctx, 1, 0); /* 7 */ + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ + xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */ + xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */ + xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 8, 1); /* 00000001 UNK19E0 */ + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + if (device->chipset == 0x50) + xf_emit(ctx, 1, 0); /* ff */ + else + xf_emit(ctx, 3, 0); /* 1, 7, 3ff */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, magic1); /* 3ff/7ff tesla UNK0D68 */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_LOCAL */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_STACK */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_DST */ + xf_emit(ctx, 1, 0); /* 7 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 8, 0); /* 000000ff RT_ADDRESS_HIGH */ + xf_emit(ctx, 8, 0); /* ffffffff RT_LAYER_STRIDE */ + xf_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */ + xf_emit(ctx, 8, 8); /* 0000007f RT_TILE_MODE */ + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 8, 0x400); /* 0fffffff RT_HORIZ */ + xf_emit(ctx, 8, 0x300); /* 0000ffff RT_VERT */ + xf_emit(ctx, 1, 1); /* 00001fff RT_ARRAY_MODE */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, 0x20); /* 00000fff DST_TILE_MODE */ + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */ + xf_emit(ctx, 1, 0); /* 000007ff DST_LAYER */ + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + xf_emit(ctx, 1, 0); /* ffffffff DST_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 000000ff DST_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0x40); /* 0007ffff DST_PITCH */ + xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */ + xf_emit(ctx, 1, 0); /* 0000ffff */ + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK15AC */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ + xf_emit(ctx, 1, 0); /* 00000007 */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_ZETA */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 2, 0); /* ffff, ff/3ff */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* ffffffff ZETA_LAYER_STRIDE */ + xf_emit(ctx, 1, 0); /* 000000ff ZETA_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff ZETA_ADDRESS_LOW */ + xf_emit(ctx, 1, 4); /* 00000007 ZETA_TILE_MODE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0x400); /* 0fffffff ZETA_HORIZ */ + xf_emit(ctx, 1, 0x300); /* 0000ffff ZETA_VERT */ + xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ + xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ + xf_emit(ctx, 1, 0); /* 7 */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + if (IS_NVA3F(device->chipset)) { + xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + } + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + if (device->chipset >= 0xa0) + xf_emit(ctx, 1, 0x0fac6881); /* fffffff */ + xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + if (IS_NVA3F(device->chipset)) { + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */ + } + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + if (device->chipset >= 0xa0) { + xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */ + xf_emit(ctx, 1, 0xfac6881); /* fffffff */ + xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */ + xf_emit(ctx, 1, 4); /* 7 */ + xf_emit(ctx, 1, 0); /* 1 */ + xf_emit(ctx, 2, 1); /* 1 */ + xf_emit(ctx, 2, 0); /* 7, f */ + xf_emit(ctx, 1, 1); /* 1 */ + xf_emit(ctx, 1, 0); /* 7/f */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 0x9, 0); /* 1 */ + else + xf_emit(ctx, 0x8, 0); /* 1 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 8, 1); /* 1 */ + xf_emit(ctx, 1, 0x11); /* 7f */ + xf_emit(ctx, 7, 0); /* 7f */ + xf_emit(ctx, 1, 0xfac6881); /* fffffff */ + xf_emit(ctx, 1, 0xf); /* f */ + xf_emit(ctx, 7, 0); /* f */ + xf_emit(ctx, 1, 0x11); /* 7f */ + xf_emit(ctx, 1, 1); /* 1 */ + xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */ + if (IS_NVA3F(device->chipset)) { + xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + } + } +} + +static void +nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */ + if (device->chipset != 0x50) + xf_emit(ctx, 1, 0); /* 3 */ + xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */ + xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */ + xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */ + xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */ + if (device->chipset == 0x50) + xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */ + else + xf_emit(ctx, 2, 0); /* 3ff, 1 */ + xf_emit(ctx, 1, 0x2a712488); /* ffffffff SRC_TIC_0 */ + xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_1 */ + xf_emit(ctx, 1, 0x4085c000); /* ffffffff SRC_TIC_2 */ + xf_emit(ctx, 1, 0x40); /* ffffffff SRC_TIC_3 */ + xf_emit(ctx, 1, 0x100); /* ffffffff SRC_TIC_4 */ + xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */ + xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */ + xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */ + if (device->chipset == 0x50) { + xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ + xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */ + xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */ + } else if (!IS_NVAAF(device->chipset)) { + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1664 / turing UNK03E8 */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + } else { + xf_emit(ctx, 0x6, 0); + } + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_TEXTURE */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_SRC */ +} + +static void +nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */ + xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */ + xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */ + xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ + xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */ + xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */ + xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */ + xf_emit(ctx, 1, 0x30201000); /* ffffffff tesla UNK1670 */ + xf_emit(ctx, 1, 0x70605040); /* ffffffff tesla UNK1670 */ + xf_emit(ctx, 1, 0xb8a89888); /* ffffffff tesla UNK1670 */ + xf_emit(ctx, 1, 0xf8e8d8c8); /* ffffffff tesla UNK1670 */ + xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ +} + +static void +nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + if (device->chipset < 0xa0) { + nv50_graph_construct_xfer_unk84xx(ctx); + nv50_graph_construct_xfer_tprop(ctx); + nv50_graph_construct_xfer_tex(ctx); + nv50_graph_construct_xfer_unk8cxx(ctx); + } else { + nv50_graph_construct_xfer_tex(ctx); + nv50_graph_construct_xfer_tprop(ctx); + nv50_graph_construct_xfer_unk8cxx(ctx); + nv50_graph_construct_xfer_unk84xx(ctx); + } +} + +static void +nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + int i, mpcnt = 2; + switch (device->chipset) { + case 0x98: + case 0xaa: + mpcnt = 1; + break; + case 0x50: + case 0x84: + case 0x86: + case 0x92: + case 0x94: + case 0x96: + case 0xa8: + case 0xac: + mpcnt = 2; + break; + case 0xa0: + case 0xa3: + case 0xa5: + case 0xaf: + mpcnt = 3; + break; + } + for (i = 0; i < mpcnt; i++) { + xf_emit(ctx, 1, 0); /* ff */ + xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */ + xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */ + xf_emit(ctx, 1, 0x04000400); /* ffffffff */ + if (device->chipset >= 0xa0) + xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */ + xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + if (device->chipset == 0x86 || device->chipset == 0x98 || device->chipset == 0xa8 || IS_NVAAF(device->chipset)) { + xf_emit(ctx, 1, 0xe00); /* 7fff */ + xf_emit(ctx, 1, 0x1e00); /* 7fff */ + } + xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */ + xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + if (device->chipset == 0x50) + xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */ + xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ + xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ + if (IS_NVAAF(device->chipset)) + xf_emit(ctx, 0xb, 0); /* RO */ + else if (device->chipset >= 0xa0) + xf_emit(ctx, 0xc, 0); /* RO */ + else + xf_emit(ctx, 0xa, 0); /* RO */ + } + xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + if (device->chipset >= 0xa0) { + xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */ + } + xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ + xf_emit(ctx, 1, 1); /* 00000001 LANES32 */ + xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */ + xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */ + xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */ + xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */ + xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */ + xf_emit(ctx, 1, 0); /* ff FP_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */ + xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* 000000ff FRAG_COLOR_CLAMP_EN */ + xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ + xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ + if (IS_NVA3F(device->chipset)) + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ + xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ + xf_emit(ctx, 1, 4); /* ffffffff tesla UNK1400 */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ + xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ + if (IS_NVA3F(device->chipset)) { + xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ + xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */ + xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ + } + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ + xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ + /* XXX: demagic this part some day */ + if (device->chipset == 0x50) + xf_emit(ctx, 0x3a0, 0); + else if (device->chipset < 0x94) + xf_emit(ctx, 0x3a2, 0); + else if (device->chipset == 0x98 || device->chipset == 0xaa) + xf_emit(ctx, 0x39f, 0); + else + xf_emit(ctx, 0x3a3, 0); + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 0); /* 7 OPERATION */ + xf_emit(ctx, 1, 1); /* 1 DST_LINEAR */ + xf_emit(ctx, 0x2d, 0); +} + +static void +nv50_graph_construct_xfer2(struct nouveau_grctx *ctx) +{ + struct nouveau_device *device = ctx->device; + int i; + u32 offset; + u32 units = nv_rd32 (ctx->device, 0x1540); + int size = 0; + + offset = (ctx->ctxvals_pos+0x3f)&~0x3f; + + if (device->chipset < 0xa0) { + for (i = 0; i < 8; i++) { + ctx->ctxvals_pos = offset + i; + /* that little bugger belongs to csched. No idea + * what it's doing here. */ + if (i == 0) + xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */ + if (units & (1 << i)) + nv50_graph_construct_xfer_mpc(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + } + } else { + /* Strand 0: TPs 0, 1 */ + ctx->ctxvals_pos = offset; + /* that little bugger belongs to csched. No idea + * what it's doing here. */ + xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */ + if (units & (1 << 0)) + nv50_graph_construct_xfer_mpc(ctx); + if (units & (1 << 1)) + nv50_graph_construct_xfer_mpc(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 1: TPs 2, 3 */ + ctx->ctxvals_pos = offset + 1; + if (units & (1 << 2)) + nv50_graph_construct_xfer_mpc(ctx); + if (units & (1 << 3)) + nv50_graph_construct_xfer_mpc(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 2: TPs 4, 5, 6 */ + ctx->ctxvals_pos = offset + 2; + if (units & (1 << 4)) + nv50_graph_construct_xfer_mpc(ctx); + if (units & (1 << 5)) + nv50_graph_construct_xfer_mpc(ctx); + if (units & (1 << 6)) + nv50_graph_construct_xfer_mpc(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 3: TPs 7, 8, 9 */ + ctx->ctxvals_pos = offset + 3; + if (units & (1 << 7)) + nv50_graph_construct_xfer_mpc(ctx); + if (units & (1 << 8)) + nv50_graph_construct_xfer_mpc(ctx); + if (units & (1 << 9)) + nv50_graph_construct_xfer_mpc(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + } + ctx->ctxvals_pos = offset + size * 8; + ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f; + cp_lsr (ctx, offset); + cp_out (ctx, CP_SET_XFER_POINTER); + cp_lsr (ctx, size); + cp_out (ctx, CP_SEEK_2); + cp_out (ctx, CP_XFER_2); + cp_wait(ctx, XFER, BUSY); +} diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c new file mode 100644 index 0000000..4cc6269 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c @@ -0,0 +1,3038 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "nvc0.h" + +void +nv_icmd(struct nvc0_graph_priv *priv, u32 icmd, u32 data) +{ + nv_wr32(priv, 0x400204, data); + nv_wr32(priv, 0x400200, icmd); + while (nv_rd32(priv, 0x400700) & 2) {} +} + +int +nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) +{ + struct nouveau_bar *bar = nouveau_bar(priv); + struct nouveau_gpuobj *chan; + u32 size = (0x80000 + priv->size + 4095) & ~4095; + int ret, i; + + /* allocate memory to for a "channel", which we'll use to generate + * the default context values + */ + ret = nouveau_gpuobj_new(nv_object(priv), NULL, size, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, &info->chan); + chan = info->chan; + if (ret) { + nv_error(priv, "failed to allocate channel memory, %d\n", ret); + return ret; + } + + /* PGD pointer */ + nv_wo32(chan, 0x0200, lower_32_bits(chan->addr + 0x1000)); + nv_wo32(chan, 0x0204, upper_32_bits(chan->addr + 0x1000)); + nv_wo32(chan, 0x0208, 0xffffffff); + nv_wo32(chan, 0x020c, 0x000000ff); + + /* PGT[0] pointer */ + nv_wo32(chan, 0x1000, 0x00000000); + nv_wo32(chan, 0x1004, 0x00000001 | (chan->addr + 0x2000) >> 8); + + /* identity-map the whole "channel" into its own vm */ + for (i = 0; i < size / 4096; i++) { + u64 addr = ((chan->addr + (i * 4096)) >> 8) | 1; + nv_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr)); + nv_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr)); + } + + /* context pointer (virt) */ + nv_wo32(chan, 0x0210, 0x00080004); + nv_wo32(chan, 0x0214, 0x00000000); + + bar->flush(bar); + + nv_wr32(priv, 0x100cb8, (chan->addr + 0x1000) >> 8); + nv_wr32(priv, 0x100cbc, 0x80000001); + nv_wait(priv, 0x100c80, 0x00008000, 0x00008000); + + /* setup default state for mmio list construction */ + info->data = priv->mmio_data; + info->mmio = priv->mmio_list; + info->addr = 0x2000 + (i * 8); + info->priv = priv; + info->buffer_nr = 0; + + if (priv->firmware) { + nv_wr32(priv, 0x409840, 0x00000030); + nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12); + nv_wr32(priv, 0x409504, 0x00000003); + if (!nv_wait(priv, 0x409800, 0x00000010, 0x00000010)) + nv_error(priv, "load_ctx timeout\n"); + + nv_wo32(chan, 0x8001c, 1); + nv_wo32(chan, 0x80020, 0); + nv_wo32(chan, 0x80028, 0); + nv_wo32(chan, 0x8002c, 0); + bar->flush(bar); + return 0; + } + + /* HUB_FUC(SET_CHAN) */ + nv_wr32(priv, 0x409840, 0x80000000); + nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12); + nv_wr32(priv, 0x409504, 0x00000001); + if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) { + nv_error(priv, "HUB_SET_CHAN timeout\n"); + nvc0_graph_ctxctl_debug(priv); + nouveau_gpuobj_ref(NULL, &info->chan); + return -EBUSY; + } + + return 0; +} + +void +nvc0_grctx_data(struct nvc0_grctx *info, u32 size, u32 align, u32 access) +{ + info->buffer[info->buffer_nr] = info->addr; + info->buffer[info->buffer_nr] += (align - 1); + info->buffer[info->buffer_nr] &= ~(align - 1); + info->addr = info->buffer[info->buffer_nr++] + size; + + info->data->size = size; + info->data->align = align; + info->data->access = access; + info->data++; +} + +void +nvc0_grctx_mmio(struct nvc0_grctx *info, u32 addr, u32 data, u32 shift, u32 buf) +{ + struct nvc0_graph_priv *priv = info->priv; + + info->mmio->addr = addr; + info->mmio->data = data; + info->mmio->shift = shift; + info->mmio->buffer = buf; + info->mmio++; + + if (shift) + data |= info->buffer[buf] >> shift; + nv_wr32(priv, addr, data); +} + +int +nvc0_grctx_fini(struct nvc0_grctx *info) +{ + struct nvc0_graph_priv *priv = info->priv; + int i; + + /* trigger a context unload by unsetting the "next channel valid" bit + * and faking a context switch interrupt + */ + nv_mask(priv, 0x409b04, 0x80000000, 0x00000000); + nv_wr32(priv, 0x409000, 0x00000100); + if (!nv_wait(priv, 0x409b00, 0x80000000, 0x00000000)) { + nv_error(priv, "grctx template channel unload timeout\n"); + return -EBUSY; + } + + priv->data = kmalloc(priv->size, GFP_KERNEL); + if (priv->data) { + for (i = 0; i < priv->size; i += 4) + priv->data[i / 4] = nv_ro32(info->chan, 0x80000 + i); + } + + nouveau_gpuobj_ref(NULL, &info->chan); + return priv->data ? 0 : -ENOMEM; +} + +static void +nvc0_grctx_generate_9097(struct nvc0_graph_priv *priv) +{ + u32 fermi = nvc0_graph_class(priv); + u32 mthd; + + nv_mthd(priv, 0x9097, 0x0800, 0x00000000); + nv_mthd(priv, 0x9097, 0x0840, 0x00000000); + nv_mthd(priv, 0x9097, 0x0880, 0x00000000); + nv_mthd(priv, 0x9097, 0x08c0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0900, 0x00000000); + nv_mthd(priv, 0x9097, 0x0940, 0x00000000); + nv_mthd(priv, 0x9097, 0x0980, 0x00000000); + nv_mthd(priv, 0x9097, 0x09c0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0804, 0x00000000); + nv_mthd(priv, 0x9097, 0x0844, 0x00000000); + nv_mthd(priv, 0x9097, 0x0884, 0x00000000); + nv_mthd(priv, 0x9097, 0x08c4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0904, 0x00000000); + nv_mthd(priv, 0x9097, 0x0944, 0x00000000); + nv_mthd(priv, 0x9097, 0x0984, 0x00000000); + nv_mthd(priv, 0x9097, 0x09c4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0808, 0x00000400); + nv_mthd(priv, 0x9097, 0x0848, 0x00000400); + nv_mthd(priv, 0x9097, 0x0888, 0x00000400); + nv_mthd(priv, 0x9097, 0x08c8, 0x00000400); + nv_mthd(priv, 0x9097, 0x0908, 0x00000400); + nv_mthd(priv, 0x9097, 0x0948, 0x00000400); + nv_mthd(priv, 0x9097, 0x0988, 0x00000400); + nv_mthd(priv, 0x9097, 0x09c8, 0x00000400); + nv_mthd(priv, 0x9097, 0x080c, 0x00000300); + nv_mthd(priv, 0x9097, 0x084c, 0x00000300); + nv_mthd(priv, 0x9097, 0x088c, 0x00000300); + nv_mthd(priv, 0x9097, 0x08cc, 0x00000300); + nv_mthd(priv, 0x9097, 0x090c, 0x00000300); + nv_mthd(priv, 0x9097, 0x094c, 0x00000300); + nv_mthd(priv, 0x9097, 0x098c, 0x00000300); + nv_mthd(priv, 0x9097, 0x09cc, 0x00000300); + nv_mthd(priv, 0x9097, 0x0810, 0x000000cf); + nv_mthd(priv, 0x9097, 0x0850, 0x00000000); + nv_mthd(priv, 0x9097, 0x0890, 0x00000000); + nv_mthd(priv, 0x9097, 0x08d0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0910, 0x00000000); + nv_mthd(priv, 0x9097, 0x0950, 0x00000000); + nv_mthd(priv, 0x9097, 0x0990, 0x00000000); + nv_mthd(priv, 0x9097, 0x09d0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0814, 0x00000040); + nv_mthd(priv, 0x9097, 0x0854, 0x00000040); + nv_mthd(priv, 0x9097, 0x0894, 0x00000040); + nv_mthd(priv, 0x9097, 0x08d4, 0x00000040); + nv_mthd(priv, 0x9097, 0x0914, 0x00000040); + nv_mthd(priv, 0x9097, 0x0954, 0x00000040); + nv_mthd(priv, 0x9097, 0x0994, 0x00000040); + nv_mthd(priv, 0x9097, 0x09d4, 0x00000040); + nv_mthd(priv, 0x9097, 0x0818, 0x00000001); + nv_mthd(priv, 0x9097, 0x0858, 0x00000001); + nv_mthd(priv, 0x9097, 0x0898, 0x00000001); + nv_mthd(priv, 0x9097, 0x08d8, 0x00000001); + nv_mthd(priv, 0x9097, 0x0918, 0x00000001); + nv_mthd(priv, 0x9097, 0x0958, 0x00000001); + nv_mthd(priv, 0x9097, 0x0998, 0x00000001); + nv_mthd(priv, 0x9097, 0x09d8, 0x00000001); + nv_mthd(priv, 0x9097, 0x081c, 0x00000000); + nv_mthd(priv, 0x9097, 0x085c, 0x00000000); + nv_mthd(priv, 0x9097, 0x089c, 0x00000000); + nv_mthd(priv, 0x9097, 0x08dc, 0x00000000); + nv_mthd(priv, 0x9097, 0x091c, 0x00000000); + nv_mthd(priv, 0x9097, 0x095c, 0x00000000); + nv_mthd(priv, 0x9097, 0x099c, 0x00000000); + nv_mthd(priv, 0x9097, 0x09dc, 0x00000000); + nv_mthd(priv, 0x9097, 0x0820, 0x00000000); + nv_mthd(priv, 0x9097, 0x0860, 0x00000000); + nv_mthd(priv, 0x9097, 0x08a0, 0x00000000); + nv_mthd(priv, 0x9097, 0x08e0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0920, 0x00000000); + nv_mthd(priv, 0x9097, 0x0960, 0x00000000); + nv_mthd(priv, 0x9097, 0x09a0, 0x00000000); + nv_mthd(priv, 0x9097, 0x09e0, 0x00000000); + nv_mthd(priv, 0x9097, 0x2700, 0x00000000); + nv_mthd(priv, 0x9097, 0x2720, 0x00000000); + nv_mthd(priv, 0x9097, 0x2740, 0x00000000); + nv_mthd(priv, 0x9097, 0x2760, 0x00000000); + nv_mthd(priv, 0x9097, 0x2780, 0x00000000); + nv_mthd(priv, 0x9097, 0x27a0, 0x00000000); + nv_mthd(priv, 0x9097, 0x27c0, 0x00000000); + nv_mthd(priv, 0x9097, 0x27e0, 0x00000000); + nv_mthd(priv, 0x9097, 0x2704, 0x00000000); + nv_mthd(priv, 0x9097, 0x2724, 0x00000000); + nv_mthd(priv, 0x9097, 0x2744, 0x00000000); + nv_mthd(priv, 0x9097, 0x2764, 0x00000000); + nv_mthd(priv, 0x9097, 0x2784, 0x00000000); + nv_mthd(priv, 0x9097, 0x27a4, 0x00000000); + nv_mthd(priv, 0x9097, 0x27c4, 0x00000000); + nv_mthd(priv, 0x9097, 0x27e4, 0x00000000); + nv_mthd(priv, 0x9097, 0x2708, 0x00000000); + nv_mthd(priv, 0x9097, 0x2728, 0x00000000); + nv_mthd(priv, 0x9097, 0x2748, 0x00000000); + nv_mthd(priv, 0x9097, 0x2768, 0x00000000); + nv_mthd(priv, 0x9097, 0x2788, 0x00000000); + nv_mthd(priv, 0x9097, 0x27a8, 0x00000000); + nv_mthd(priv, 0x9097, 0x27c8, 0x00000000); + nv_mthd(priv, 0x9097, 0x27e8, 0x00000000); + nv_mthd(priv, 0x9097, 0x270c, 0x00000000); + nv_mthd(priv, 0x9097, 0x272c, 0x00000000); + nv_mthd(priv, 0x9097, 0x274c, 0x00000000); + nv_mthd(priv, 0x9097, 0x276c, 0x00000000); + nv_mthd(priv, 0x9097, 0x278c, 0x00000000); + nv_mthd(priv, 0x9097, 0x27ac, 0x00000000); + nv_mthd(priv, 0x9097, 0x27cc, 0x00000000); + nv_mthd(priv, 0x9097, 0x27ec, 0x00000000); + nv_mthd(priv, 0x9097, 0x2710, 0x00014000); + nv_mthd(priv, 0x9097, 0x2730, 0x00014000); + nv_mthd(priv, 0x9097, 0x2750, 0x00014000); + nv_mthd(priv, 0x9097, 0x2770, 0x00014000); + nv_mthd(priv, 0x9097, 0x2790, 0x00014000); + nv_mthd(priv, 0x9097, 0x27b0, 0x00014000); + nv_mthd(priv, 0x9097, 0x27d0, 0x00014000); + nv_mthd(priv, 0x9097, 0x27f0, 0x00014000); + nv_mthd(priv, 0x9097, 0x2714, 0x00000040); + nv_mthd(priv, 0x9097, 0x2734, 0x00000040); + nv_mthd(priv, 0x9097, 0x2754, 0x00000040); + nv_mthd(priv, 0x9097, 0x2774, 0x00000040); + nv_mthd(priv, 0x9097, 0x2794, 0x00000040); + nv_mthd(priv, 0x9097, 0x27b4, 0x00000040); + nv_mthd(priv, 0x9097, 0x27d4, 0x00000040); + nv_mthd(priv, 0x9097, 0x27f4, 0x00000040); + nv_mthd(priv, 0x9097, 0x1c00, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c10, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c20, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c30, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c40, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c50, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c60, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c70, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c80, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c90, 0x00000000); + nv_mthd(priv, 0x9097, 0x1ca0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1cb0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1cc0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1cd0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1ce0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1cf0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c04, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c14, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c24, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c34, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c44, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c54, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c64, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c74, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c84, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c94, 0x00000000); + nv_mthd(priv, 0x9097, 0x1ca4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1cb4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1cc4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1cd4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1ce4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1cf4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c08, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c18, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c28, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c38, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c48, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c58, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c68, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c78, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c88, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c98, 0x00000000); + nv_mthd(priv, 0x9097, 0x1ca8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1cb8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1cc8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1cd8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1ce8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1cf8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c0c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c1c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c2c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c3c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c4c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c5c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c6c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c7c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c8c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1c9c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1cac, 0x00000000); + nv_mthd(priv, 0x9097, 0x1cbc, 0x00000000); + nv_mthd(priv, 0x9097, 0x1ccc, 0x00000000); + nv_mthd(priv, 0x9097, 0x1cdc, 0x00000000); + nv_mthd(priv, 0x9097, 0x1cec, 0x00000000); + nv_mthd(priv, 0x9097, 0x1cfc, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d00, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d10, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d20, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d30, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d40, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d50, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d60, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d70, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d80, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d90, 0x00000000); + nv_mthd(priv, 0x9097, 0x1da0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1db0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1dc0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1dd0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1de0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1df0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d04, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d14, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d24, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d34, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d44, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d54, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d64, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d74, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d84, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d94, 0x00000000); + nv_mthd(priv, 0x9097, 0x1da4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1db4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1dc4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1dd4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1de4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1df4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d08, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d18, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d28, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d38, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d48, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d58, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d68, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d78, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d88, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d98, 0x00000000); + nv_mthd(priv, 0x9097, 0x1da8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1db8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1dc8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1dd8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1de8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1df8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d0c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d1c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d2c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d3c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d4c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d5c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d6c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d7c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d8c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1d9c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1dac, 0x00000000); + nv_mthd(priv, 0x9097, 0x1dbc, 0x00000000); + nv_mthd(priv, 0x9097, 0x1dcc, 0x00000000); + nv_mthd(priv, 0x9097, 0x1ddc, 0x00000000); + nv_mthd(priv, 0x9097, 0x1dec, 0x00000000); + nv_mthd(priv, 0x9097, 0x1dfc, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f00, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f08, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f10, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f18, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f20, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f28, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f30, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f38, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f40, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f48, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f50, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f58, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f60, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f68, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f70, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f78, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f04, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f0c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f14, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f1c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f24, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f2c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f34, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f3c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f44, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f4c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f54, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f5c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f64, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f6c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f74, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f7c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f80, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f88, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f90, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f98, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fa0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fa8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fb0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fb8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fc0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fc8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fd0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fd8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fe0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fe8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1ff0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1ff8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f84, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f8c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f94, 0x00000000); + nv_mthd(priv, 0x9097, 0x1f9c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fa4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fac, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fb4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fbc, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fc4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fcc, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fd4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fdc, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fe4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1fec, 0x00000000); + nv_mthd(priv, 0x9097, 0x1ff4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1ffc, 0x00000000); + nv_mthd(priv, 0x9097, 0x2200, 0x00000022); + nv_mthd(priv, 0x9097, 0x2210, 0x00000022); + nv_mthd(priv, 0x9097, 0x2220, 0x00000022); + nv_mthd(priv, 0x9097, 0x2230, 0x00000022); + nv_mthd(priv, 0x9097, 0x2240, 0x00000022); + nv_mthd(priv, 0x9097, 0x2000, 0x00000000); + nv_mthd(priv, 0x9097, 0x2040, 0x00000011); + nv_mthd(priv, 0x9097, 0x2080, 0x00000020); + nv_mthd(priv, 0x9097, 0x20c0, 0x00000030); + nv_mthd(priv, 0x9097, 0x2100, 0x00000040); + nv_mthd(priv, 0x9097, 0x2140, 0x00000051); + nv_mthd(priv, 0x9097, 0x200c, 0x00000001); + nv_mthd(priv, 0x9097, 0x204c, 0x00000001); + nv_mthd(priv, 0x9097, 0x208c, 0x00000001); + nv_mthd(priv, 0x9097, 0x20cc, 0x00000001); + nv_mthd(priv, 0x9097, 0x210c, 0x00000001); + nv_mthd(priv, 0x9097, 0x214c, 0x00000001); + nv_mthd(priv, 0x9097, 0x2010, 0x00000000); + nv_mthd(priv, 0x9097, 0x2050, 0x00000000); + nv_mthd(priv, 0x9097, 0x2090, 0x00000001); + nv_mthd(priv, 0x9097, 0x20d0, 0x00000002); + nv_mthd(priv, 0x9097, 0x2110, 0x00000003); + nv_mthd(priv, 0x9097, 0x2150, 0x00000004); + nv_mthd(priv, 0x9097, 0x0380, 0x00000000); + nv_mthd(priv, 0x9097, 0x03a0, 0x00000000); + nv_mthd(priv, 0x9097, 0x03c0, 0x00000000); + nv_mthd(priv, 0x9097, 0x03e0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0384, 0x00000000); + nv_mthd(priv, 0x9097, 0x03a4, 0x00000000); + nv_mthd(priv, 0x9097, 0x03c4, 0x00000000); + nv_mthd(priv, 0x9097, 0x03e4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0388, 0x00000000); + nv_mthd(priv, 0x9097, 0x03a8, 0x00000000); + nv_mthd(priv, 0x9097, 0x03c8, 0x00000000); + nv_mthd(priv, 0x9097, 0x03e8, 0x00000000); + nv_mthd(priv, 0x9097, 0x038c, 0x00000000); + nv_mthd(priv, 0x9097, 0x03ac, 0x00000000); + nv_mthd(priv, 0x9097, 0x03cc, 0x00000000); + nv_mthd(priv, 0x9097, 0x03ec, 0x00000000); + nv_mthd(priv, 0x9097, 0x0700, 0x00000000); + nv_mthd(priv, 0x9097, 0x0710, 0x00000000); + nv_mthd(priv, 0x9097, 0x0720, 0x00000000); + nv_mthd(priv, 0x9097, 0x0730, 0x00000000); + nv_mthd(priv, 0x9097, 0x0704, 0x00000000); + nv_mthd(priv, 0x9097, 0x0714, 0x00000000); + nv_mthd(priv, 0x9097, 0x0724, 0x00000000); + nv_mthd(priv, 0x9097, 0x0734, 0x00000000); + nv_mthd(priv, 0x9097, 0x0708, 0x00000000); + nv_mthd(priv, 0x9097, 0x0718, 0x00000000); + nv_mthd(priv, 0x9097, 0x0728, 0x00000000); + nv_mthd(priv, 0x9097, 0x0738, 0x00000000); + nv_mthd(priv, 0x9097, 0x2800, 0x00000000); + nv_mthd(priv, 0x9097, 0x2804, 0x00000000); + nv_mthd(priv, 0x9097, 0x2808, 0x00000000); + nv_mthd(priv, 0x9097, 0x280c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2810, 0x00000000); + nv_mthd(priv, 0x9097, 0x2814, 0x00000000); + nv_mthd(priv, 0x9097, 0x2818, 0x00000000); + nv_mthd(priv, 0x9097, 0x281c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2820, 0x00000000); + nv_mthd(priv, 0x9097, 0x2824, 0x00000000); + nv_mthd(priv, 0x9097, 0x2828, 0x00000000); + nv_mthd(priv, 0x9097, 0x282c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2830, 0x00000000); + nv_mthd(priv, 0x9097, 0x2834, 0x00000000); + nv_mthd(priv, 0x9097, 0x2838, 0x00000000); + nv_mthd(priv, 0x9097, 0x283c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2840, 0x00000000); + nv_mthd(priv, 0x9097, 0x2844, 0x00000000); + nv_mthd(priv, 0x9097, 0x2848, 0x00000000); + nv_mthd(priv, 0x9097, 0x284c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2850, 0x00000000); + nv_mthd(priv, 0x9097, 0x2854, 0x00000000); + nv_mthd(priv, 0x9097, 0x2858, 0x00000000); + nv_mthd(priv, 0x9097, 0x285c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2860, 0x00000000); + nv_mthd(priv, 0x9097, 0x2864, 0x00000000); + nv_mthd(priv, 0x9097, 0x2868, 0x00000000); + nv_mthd(priv, 0x9097, 0x286c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2870, 0x00000000); + nv_mthd(priv, 0x9097, 0x2874, 0x00000000); + nv_mthd(priv, 0x9097, 0x2878, 0x00000000); + nv_mthd(priv, 0x9097, 0x287c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2880, 0x00000000); + nv_mthd(priv, 0x9097, 0x2884, 0x00000000); + nv_mthd(priv, 0x9097, 0x2888, 0x00000000); + nv_mthd(priv, 0x9097, 0x288c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2890, 0x00000000); + nv_mthd(priv, 0x9097, 0x2894, 0x00000000); + nv_mthd(priv, 0x9097, 0x2898, 0x00000000); + nv_mthd(priv, 0x9097, 0x289c, 0x00000000); + nv_mthd(priv, 0x9097, 0x28a0, 0x00000000); + nv_mthd(priv, 0x9097, 0x28a4, 0x00000000); + nv_mthd(priv, 0x9097, 0x28a8, 0x00000000); + nv_mthd(priv, 0x9097, 0x28ac, 0x00000000); + nv_mthd(priv, 0x9097, 0x28b0, 0x00000000); + nv_mthd(priv, 0x9097, 0x28b4, 0x00000000); + nv_mthd(priv, 0x9097, 0x28b8, 0x00000000); + nv_mthd(priv, 0x9097, 0x28bc, 0x00000000); + nv_mthd(priv, 0x9097, 0x28c0, 0x00000000); + nv_mthd(priv, 0x9097, 0x28c4, 0x00000000); + nv_mthd(priv, 0x9097, 0x28c8, 0x00000000); + nv_mthd(priv, 0x9097, 0x28cc, 0x00000000); + nv_mthd(priv, 0x9097, 0x28d0, 0x00000000); + nv_mthd(priv, 0x9097, 0x28d4, 0x00000000); + nv_mthd(priv, 0x9097, 0x28d8, 0x00000000); + nv_mthd(priv, 0x9097, 0x28dc, 0x00000000); + nv_mthd(priv, 0x9097, 0x28e0, 0x00000000); + nv_mthd(priv, 0x9097, 0x28e4, 0x00000000); + nv_mthd(priv, 0x9097, 0x28e8, 0x00000000); + nv_mthd(priv, 0x9097, 0x28ec, 0x00000000); + nv_mthd(priv, 0x9097, 0x28f0, 0x00000000); + nv_mthd(priv, 0x9097, 0x28f4, 0x00000000); + nv_mthd(priv, 0x9097, 0x28f8, 0x00000000); + nv_mthd(priv, 0x9097, 0x28fc, 0x00000000); + nv_mthd(priv, 0x9097, 0x2900, 0x00000000); + nv_mthd(priv, 0x9097, 0x2904, 0x00000000); + nv_mthd(priv, 0x9097, 0x2908, 0x00000000); + nv_mthd(priv, 0x9097, 0x290c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2910, 0x00000000); + nv_mthd(priv, 0x9097, 0x2914, 0x00000000); + nv_mthd(priv, 0x9097, 0x2918, 0x00000000); + nv_mthd(priv, 0x9097, 0x291c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2920, 0x00000000); + nv_mthd(priv, 0x9097, 0x2924, 0x00000000); + nv_mthd(priv, 0x9097, 0x2928, 0x00000000); + nv_mthd(priv, 0x9097, 0x292c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2930, 0x00000000); + nv_mthd(priv, 0x9097, 0x2934, 0x00000000); + nv_mthd(priv, 0x9097, 0x2938, 0x00000000); + nv_mthd(priv, 0x9097, 0x293c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2940, 0x00000000); + nv_mthd(priv, 0x9097, 0x2944, 0x00000000); + nv_mthd(priv, 0x9097, 0x2948, 0x00000000); + nv_mthd(priv, 0x9097, 0x294c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2950, 0x00000000); + nv_mthd(priv, 0x9097, 0x2954, 0x00000000); + nv_mthd(priv, 0x9097, 0x2958, 0x00000000); + nv_mthd(priv, 0x9097, 0x295c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2960, 0x00000000); + nv_mthd(priv, 0x9097, 0x2964, 0x00000000); + nv_mthd(priv, 0x9097, 0x2968, 0x00000000); + nv_mthd(priv, 0x9097, 0x296c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2970, 0x00000000); + nv_mthd(priv, 0x9097, 0x2974, 0x00000000); + nv_mthd(priv, 0x9097, 0x2978, 0x00000000); + nv_mthd(priv, 0x9097, 0x297c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2980, 0x00000000); + nv_mthd(priv, 0x9097, 0x2984, 0x00000000); + nv_mthd(priv, 0x9097, 0x2988, 0x00000000); + nv_mthd(priv, 0x9097, 0x298c, 0x00000000); + nv_mthd(priv, 0x9097, 0x2990, 0x00000000); + nv_mthd(priv, 0x9097, 0x2994, 0x00000000); + nv_mthd(priv, 0x9097, 0x2998, 0x00000000); + nv_mthd(priv, 0x9097, 0x299c, 0x00000000); + nv_mthd(priv, 0x9097, 0x29a0, 0x00000000); + nv_mthd(priv, 0x9097, 0x29a4, 0x00000000); + nv_mthd(priv, 0x9097, 0x29a8, 0x00000000); + nv_mthd(priv, 0x9097, 0x29ac, 0x00000000); + nv_mthd(priv, 0x9097, 0x29b0, 0x00000000); + nv_mthd(priv, 0x9097, 0x29b4, 0x00000000); + nv_mthd(priv, 0x9097, 0x29b8, 0x00000000); + nv_mthd(priv, 0x9097, 0x29bc, 0x00000000); + nv_mthd(priv, 0x9097, 0x29c0, 0x00000000); + nv_mthd(priv, 0x9097, 0x29c4, 0x00000000); + nv_mthd(priv, 0x9097, 0x29c8, 0x00000000); + nv_mthd(priv, 0x9097, 0x29cc, 0x00000000); + nv_mthd(priv, 0x9097, 0x29d0, 0x00000000); + nv_mthd(priv, 0x9097, 0x29d4, 0x00000000); + nv_mthd(priv, 0x9097, 0x29d8, 0x00000000); + nv_mthd(priv, 0x9097, 0x29dc, 0x00000000); + nv_mthd(priv, 0x9097, 0x29e0, 0x00000000); + nv_mthd(priv, 0x9097, 0x29e4, 0x00000000); + nv_mthd(priv, 0x9097, 0x29e8, 0x00000000); + nv_mthd(priv, 0x9097, 0x29ec, 0x00000000); + nv_mthd(priv, 0x9097, 0x29f0, 0x00000000); + nv_mthd(priv, 0x9097, 0x29f4, 0x00000000); + nv_mthd(priv, 0x9097, 0x29f8, 0x00000000); + nv_mthd(priv, 0x9097, 0x29fc, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a00, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a20, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a40, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a60, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a80, 0x00000000); + nv_mthd(priv, 0x9097, 0x0aa0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ac0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ae0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b00, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b20, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b40, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b60, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b80, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ba0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0bc0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0be0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a04, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a24, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a44, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a64, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a84, 0x00000000); + nv_mthd(priv, 0x9097, 0x0aa4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ac4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ae4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b04, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b24, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b44, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b64, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b84, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ba4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0bc4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0be4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a08, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a28, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a48, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a68, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a88, 0x00000000); + nv_mthd(priv, 0x9097, 0x0aa8, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ac8, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ae8, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b08, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b28, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b48, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b68, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b88, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ba8, 0x00000000); + nv_mthd(priv, 0x9097, 0x0bc8, 0x00000000); + nv_mthd(priv, 0x9097, 0x0be8, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a0c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a2c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a4c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a6c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a8c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0aac, 0x00000000); + nv_mthd(priv, 0x9097, 0x0acc, 0x00000000); + nv_mthd(priv, 0x9097, 0x0aec, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b0c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b2c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b4c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b6c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b8c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0bac, 0x00000000); + nv_mthd(priv, 0x9097, 0x0bcc, 0x00000000); + nv_mthd(priv, 0x9097, 0x0bec, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a10, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a30, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a50, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a70, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a90, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ab0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ad0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0af0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b10, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b30, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b50, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b70, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b90, 0x00000000); + nv_mthd(priv, 0x9097, 0x0bb0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0bd0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0bf0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a14, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a34, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a54, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a74, 0x00000000); + nv_mthd(priv, 0x9097, 0x0a94, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ab4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ad4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0af4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b14, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b34, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b54, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b74, 0x00000000); + nv_mthd(priv, 0x9097, 0x0b94, 0x00000000); + nv_mthd(priv, 0x9097, 0x0bb4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0bd4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0bf4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c00, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c10, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c20, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c30, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c40, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c50, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c60, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c70, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c80, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c90, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ca0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0cb0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0cc0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0cd0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ce0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0cf0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c04, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c14, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c24, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c34, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c44, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c54, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c64, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c74, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c84, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c94, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ca4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0cb4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0cc4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0cd4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ce4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0cf4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c08, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c18, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c28, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c38, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c48, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c58, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c68, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c78, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c88, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c98, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ca8, 0x00000000); + nv_mthd(priv, 0x9097, 0x0cb8, 0x00000000); + nv_mthd(priv, 0x9097, 0x0cc8, 0x00000000); + nv_mthd(priv, 0x9097, 0x0cd8, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ce8, 0x00000000); + nv_mthd(priv, 0x9097, 0x0cf8, 0x00000000); + nv_mthd(priv, 0x9097, 0x0c0c, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0c1c, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0c2c, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0c3c, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0c4c, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0c5c, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0c6c, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0c7c, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0c8c, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0c9c, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0cac, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0cbc, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0ccc, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0cdc, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0cec, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0cfc, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0d00, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0d08, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0d10, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0d18, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0d20, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0d28, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0d30, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0d38, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0d04, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0d0c, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0d14, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0d1c, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0d24, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0d2c, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0d34, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0d3c, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e00, 0x00000000); + nv_mthd(priv, 0x9097, 0x0e10, 0x00000000); + nv_mthd(priv, 0x9097, 0x0e20, 0x00000000); + nv_mthd(priv, 0x9097, 0x0e30, 0x00000000); + nv_mthd(priv, 0x9097, 0x0e40, 0x00000000); + nv_mthd(priv, 0x9097, 0x0e50, 0x00000000); + nv_mthd(priv, 0x9097, 0x0e60, 0x00000000); + nv_mthd(priv, 0x9097, 0x0e70, 0x00000000); + nv_mthd(priv, 0x9097, 0x0e80, 0x00000000); + nv_mthd(priv, 0x9097, 0x0e90, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ea0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0eb0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ec0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ed0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ee0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ef0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0e04, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e14, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e24, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e34, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e44, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e54, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e64, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e74, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e84, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e94, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0ea4, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0eb4, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0ec4, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0ed4, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0ee4, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0ef4, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e08, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e18, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e28, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e38, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e48, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e58, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e68, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e78, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e88, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0e98, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0ea8, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0eb8, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0ec8, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0ed8, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0ee8, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0ef8, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0d40, 0x00000000); + nv_mthd(priv, 0x9097, 0x0d48, 0x00000000); + nv_mthd(priv, 0x9097, 0x0d50, 0x00000000); + nv_mthd(priv, 0x9097, 0x0d58, 0x00000000); + nv_mthd(priv, 0x9097, 0x0d44, 0x00000000); + nv_mthd(priv, 0x9097, 0x0d4c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0d54, 0x00000000); + nv_mthd(priv, 0x9097, 0x0d5c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1e00, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e20, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e40, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e60, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e80, 0x00000001); + nv_mthd(priv, 0x9097, 0x1ea0, 0x00000001); + nv_mthd(priv, 0x9097, 0x1ec0, 0x00000001); + nv_mthd(priv, 0x9097, 0x1ee0, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e04, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e24, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e44, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e64, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e84, 0x00000001); + nv_mthd(priv, 0x9097, 0x1ea4, 0x00000001); + nv_mthd(priv, 0x9097, 0x1ec4, 0x00000001); + nv_mthd(priv, 0x9097, 0x1ee4, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e08, 0x00000002); + nv_mthd(priv, 0x9097, 0x1e28, 0x00000002); + nv_mthd(priv, 0x9097, 0x1e48, 0x00000002); + nv_mthd(priv, 0x9097, 0x1e68, 0x00000002); + nv_mthd(priv, 0x9097, 0x1e88, 0x00000002); + nv_mthd(priv, 0x9097, 0x1ea8, 0x00000002); + nv_mthd(priv, 0x9097, 0x1ec8, 0x00000002); + nv_mthd(priv, 0x9097, 0x1ee8, 0x00000002); + nv_mthd(priv, 0x9097, 0x1e0c, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e2c, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e4c, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e6c, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e8c, 0x00000001); + nv_mthd(priv, 0x9097, 0x1eac, 0x00000001); + nv_mthd(priv, 0x9097, 0x1ecc, 0x00000001); + nv_mthd(priv, 0x9097, 0x1eec, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e10, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e30, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e50, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e70, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e90, 0x00000001); + nv_mthd(priv, 0x9097, 0x1eb0, 0x00000001); + nv_mthd(priv, 0x9097, 0x1ed0, 0x00000001); + nv_mthd(priv, 0x9097, 0x1ef0, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e14, 0x00000002); + nv_mthd(priv, 0x9097, 0x1e34, 0x00000002); + nv_mthd(priv, 0x9097, 0x1e54, 0x00000002); + nv_mthd(priv, 0x9097, 0x1e74, 0x00000002); + nv_mthd(priv, 0x9097, 0x1e94, 0x00000002); + nv_mthd(priv, 0x9097, 0x1eb4, 0x00000002); + nv_mthd(priv, 0x9097, 0x1ed4, 0x00000002); + nv_mthd(priv, 0x9097, 0x1ef4, 0x00000002); + nv_mthd(priv, 0x9097, 0x1e18, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e38, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e58, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e78, 0x00000001); + nv_mthd(priv, 0x9097, 0x1e98, 0x00000001); + nv_mthd(priv, 0x9097, 0x1eb8, 0x00000001); + nv_mthd(priv, 0x9097, 0x1ed8, 0x00000001); + nv_mthd(priv, 0x9097, 0x1ef8, 0x00000001); + if (fermi == 0x9097) { + for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4) + nv_mthd(priv, 0x9097, mthd, 0x00000000); + } + nv_mthd(priv, 0x9097, 0x030c, 0x00000001); + nv_mthd(priv, 0x9097, 0x1944, 0x00000000); + nv_mthd(priv, 0x9097, 0x1514, 0x00000000); + nv_mthd(priv, 0x9097, 0x0d68, 0x0000ffff); + nv_mthd(priv, 0x9097, 0x121c, 0x0fac6881); + nv_mthd(priv, 0x9097, 0x0fac, 0x00000001); + nv_mthd(priv, 0x9097, 0x1538, 0x00000001); + nv_mthd(priv, 0x9097, 0x0fe0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0fe4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0fe8, 0x00000014); + nv_mthd(priv, 0x9097, 0x0fec, 0x00000040); + nv_mthd(priv, 0x9097, 0x0ff0, 0x00000000); + nv_mthd(priv, 0x9097, 0x179c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1228, 0x00000400); + nv_mthd(priv, 0x9097, 0x122c, 0x00000300); + nv_mthd(priv, 0x9097, 0x1230, 0x00010001); + nv_mthd(priv, 0x9097, 0x07f8, 0x00000000); + nv_mthd(priv, 0x9097, 0x15b4, 0x00000001); + nv_mthd(priv, 0x9097, 0x15cc, 0x00000000); + nv_mthd(priv, 0x9097, 0x1534, 0x00000000); + nv_mthd(priv, 0x9097, 0x0fb0, 0x00000000); + nv_mthd(priv, 0x9097, 0x15d0, 0x00000000); + nv_mthd(priv, 0x9097, 0x153c, 0x00000000); + nv_mthd(priv, 0x9097, 0x16b4, 0x00000003); + nv_mthd(priv, 0x9097, 0x0fbc, 0x0000ffff); + nv_mthd(priv, 0x9097, 0x0fc0, 0x0000ffff); + nv_mthd(priv, 0x9097, 0x0fc4, 0x0000ffff); + nv_mthd(priv, 0x9097, 0x0fc8, 0x0000ffff); + nv_mthd(priv, 0x9097, 0x0df8, 0x00000000); + nv_mthd(priv, 0x9097, 0x0dfc, 0x00000000); + nv_mthd(priv, 0x9097, 0x1948, 0x00000000); + nv_mthd(priv, 0x9097, 0x1970, 0x00000001); + nv_mthd(priv, 0x9097, 0x161c, 0x000009f0); + nv_mthd(priv, 0x9097, 0x0dcc, 0x00000010); + nv_mthd(priv, 0x9097, 0x163c, 0x00000000); + nv_mthd(priv, 0x9097, 0x15e4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1160, 0x25e00040); + nv_mthd(priv, 0x9097, 0x1164, 0x25e00040); + nv_mthd(priv, 0x9097, 0x1168, 0x25e00040); + nv_mthd(priv, 0x9097, 0x116c, 0x25e00040); + nv_mthd(priv, 0x9097, 0x1170, 0x25e00040); + nv_mthd(priv, 0x9097, 0x1174, 0x25e00040); + nv_mthd(priv, 0x9097, 0x1178, 0x25e00040); + nv_mthd(priv, 0x9097, 0x117c, 0x25e00040); + nv_mthd(priv, 0x9097, 0x1180, 0x25e00040); + nv_mthd(priv, 0x9097, 0x1184, 0x25e00040); + nv_mthd(priv, 0x9097, 0x1188, 0x25e00040); + nv_mthd(priv, 0x9097, 0x118c, 0x25e00040); + nv_mthd(priv, 0x9097, 0x1190, 0x25e00040); + nv_mthd(priv, 0x9097, 0x1194, 0x25e00040); + nv_mthd(priv, 0x9097, 0x1198, 0x25e00040); + nv_mthd(priv, 0x9097, 0x119c, 0x25e00040); + nv_mthd(priv, 0x9097, 0x11a0, 0x25e00040); + nv_mthd(priv, 0x9097, 0x11a4, 0x25e00040); + nv_mthd(priv, 0x9097, 0x11a8, 0x25e00040); + nv_mthd(priv, 0x9097, 0x11ac, 0x25e00040); + nv_mthd(priv, 0x9097, 0x11b0, 0x25e00040); + nv_mthd(priv, 0x9097, 0x11b4, 0x25e00040); + nv_mthd(priv, 0x9097, 0x11b8, 0x25e00040); + nv_mthd(priv, 0x9097, 0x11bc, 0x25e00040); + nv_mthd(priv, 0x9097, 0x11c0, 0x25e00040); + nv_mthd(priv, 0x9097, 0x11c4, 0x25e00040); + nv_mthd(priv, 0x9097, 0x11c8, 0x25e00040); + nv_mthd(priv, 0x9097, 0x11cc, 0x25e00040); + nv_mthd(priv, 0x9097, 0x11d0, 0x25e00040); + nv_mthd(priv, 0x9097, 0x11d4, 0x25e00040); + nv_mthd(priv, 0x9097, 0x11d8, 0x25e00040); + nv_mthd(priv, 0x9097, 0x11dc, 0x25e00040); + nv_mthd(priv, 0x9097, 0x1880, 0x00000000); + nv_mthd(priv, 0x9097, 0x1884, 0x00000000); + nv_mthd(priv, 0x9097, 0x1888, 0x00000000); + nv_mthd(priv, 0x9097, 0x188c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1890, 0x00000000); + nv_mthd(priv, 0x9097, 0x1894, 0x00000000); + nv_mthd(priv, 0x9097, 0x1898, 0x00000000); + nv_mthd(priv, 0x9097, 0x189c, 0x00000000); + nv_mthd(priv, 0x9097, 0x18a0, 0x00000000); + nv_mthd(priv, 0x9097, 0x18a4, 0x00000000); + nv_mthd(priv, 0x9097, 0x18a8, 0x00000000); + nv_mthd(priv, 0x9097, 0x18ac, 0x00000000); + nv_mthd(priv, 0x9097, 0x18b0, 0x00000000); + nv_mthd(priv, 0x9097, 0x18b4, 0x00000000); + nv_mthd(priv, 0x9097, 0x18b8, 0x00000000); + nv_mthd(priv, 0x9097, 0x18bc, 0x00000000); + nv_mthd(priv, 0x9097, 0x18c0, 0x00000000); + nv_mthd(priv, 0x9097, 0x18c4, 0x00000000); + nv_mthd(priv, 0x9097, 0x18c8, 0x00000000); + nv_mthd(priv, 0x9097, 0x18cc, 0x00000000); + nv_mthd(priv, 0x9097, 0x18d0, 0x00000000); + nv_mthd(priv, 0x9097, 0x18d4, 0x00000000); + nv_mthd(priv, 0x9097, 0x18d8, 0x00000000); + nv_mthd(priv, 0x9097, 0x18dc, 0x00000000); + nv_mthd(priv, 0x9097, 0x18e0, 0x00000000); + nv_mthd(priv, 0x9097, 0x18e4, 0x00000000); + nv_mthd(priv, 0x9097, 0x18e8, 0x00000000); + nv_mthd(priv, 0x9097, 0x18ec, 0x00000000); + nv_mthd(priv, 0x9097, 0x18f0, 0x00000000); + nv_mthd(priv, 0x9097, 0x18f4, 0x00000000); + nv_mthd(priv, 0x9097, 0x18f8, 0x00000000); + nv_mthd(priv, 0x9097, 0x18fc, 0x00000000); + nv_mthd(priv, 0x9097, 0x0f84, 0x00000000); + nv_mthd(priv, 0x9097, 0x0f88, 0x00000000); + nv_mthd(priv, 0x9097, 0x17c8, 0x00000000); + nv_mthd(priv, 0x9097, 0x17cc, 0x00000000); + nv_mthd(priv, 0x9097, 0x17d0, 0x000000ff); + nv_mthd(priv, 0x9097, 0x17d4, 0xffffffff); + nv_mthd(priv, 0x9097, 0x17d8, 0x00000002); + nv_mthd(priv, 0x9097, 0x17dc, 0x00000000); + nv_mthd(priv, 0x9097, 0x15f4, 0x00000000); + nv_mthd(priv, 0x9097, 0x15f8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1434, 0x00000000); + nv_mthd(priv, 0x9097, 0x1438, 0x00000000); + nv_mthd(priv, 0x9097, 0x0d74, 0x00000000); + nv_mthd(priv, 0x9097, 0x0dec, 0x00000001); + nv_mthd(priv, 0x9097, 0x13a4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1318, 0x00000001); + nv_mthd(priv, 0x9097, 0x1644, 0x00000000); + nv_mthd(priv, 0x9097, 0x0748, 0x00000000); + nv_mthd(priv, 0x9097, 0x0de8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1648, 0x00000000); + nv_mthd(priv, 0x9097, 0x12a4, 0x00000000); + nv_mthd(priv, 0x9097, 0x1120, 0x00000000); + nv_mthd(priv, 0x9097, 0x1124, 0x00000000); + nv_mthd(priv, 0x9097, 0x1128, 0x00000000); + nv_mthd(priv, 0x9097, 0x112c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1118, 0x00000000); + nv_mthd(priv, 0x9097, 0x164c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1658, 0x00000000); + nv_mthd(priv, 0x9097, 0x1910, 0x00000290); + nv_mthd(priv, 0x9097, 0x1518, 0x00000000); + nv_mthd(priv, 0x9097, 0x165c, 0x00000001); + nv_mthd(priv, 0x9097, 0x1520, 0x00000000); + nv_mthd(priv, 0x9097, 0x1604, 0x00000000); + nv_mthd(priv, 0x9097, 0x1570, 0x00000000); + nv_mthd(priv, 0x9097, 0x13b0, 0x3f800000); + nv_mthd(priv, 0x9097, 0x13b4, 0x3f800000); + nv_mthd(priv, 0x9097, 0x020c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1670, 0x30201000); + nv_mthd(priv, 0x9097, 0x1674, 0x70605040); + nv_mthd(priv, 0x9097, 0x1678, 0xb8a89888); + nv_mthd(priv, 0x9097, 0x167c, 0xf8e8d8c8); + nv_mthd(priv, 0x9097, 0x166c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1680, 0x00ffff00); + nv_mthd(priv, 0x9097, 0x12d0, 0x00000003); + nv_mthd(priv, 0x9097, 0x12d4, 0x00000002); + nv_mthd(priv, 0x9097, 0x1684, 0x00000000); + nv_mthd(priv, 0x9097, 0x1688, 0x00000000); + nv_mthd(priv, 0x9097, 0x0dac, 0x00001b02); + nv_mthd(priv, 0x9097, 0x0db0, 0x00001b02); + nv_mthd(priv, 0x9097, 0x0db4, 0x00000000); + nv_mthd(priv, 0x9097, 0x168c, 0x00000000); + nv_mthd(priv, 0x9097, 0x15bc, 0x00000000); + nv_mthd(priv, 0x9097, 0x156c, 0x00000000); + nv_mthd(priv, 0x9097, 0x187c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1110, 0x00000001); + nv_mthd(priv, 0x9097, 0x0dc0, 0x00000000); + nv_mthd(priv, 0x9097, 0x0dc4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0dc8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1234, 0x00000000); + nv_mthd(priv, 0x9097, 0x1690, 0x00000000); + nv_mthd(priv, 0x9097, 0x12ac, 0x00000001); + nv_mthd(priv, 0x9097, 0x02c4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0790, 0x00000000); + nv_mthd(priv, 0x9097, 0x0794, 0x00000000); + nv_mthd(priv, 0x9097, 0x0798, 0x00000000); + nv_mthd(priv, 0x9097, 0x079c, 0x00000000); + nv_mthd(priv, 0x9097, 0x07a0, 0x00000000); + nv_mthd(priv, 0x9097, 0x077c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1000, 0x00000010); + nv_mthd(priv, 0x9097, 0x10fc, 0x00000000); + nv_mthd(priv, 0x9097, 0x1290, 0x00000000); + nv_mthd(priv, 0x9097, 0x0218, 0x00000010); + nv_mthd(priv, 0x9097, 0x12d8, 0x00000000); + nv_mthd(priv, 0x9097, 0x12dc, 0x00000010); + nv_mthd(priv, 0x9097, 0x0d94, 0x00000001); + nv_mthd(priv, 0x9097, 0x155c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1560, 0x00000000); + nv_mthd(priv, 0x9097, 0x1564, 0x00001fff); + nv_mthd(priv, 0x9097, 0x1574, 0x00000000); + nv_mthd(priv, 0x9097, 0x1578, 0x00000000); + nv_mthd(priv, 0x9097, 0x157c, 0x003fffff); + nv_mthd(priv, 0x9097, 0x1354, 0x00000000); + nv_mthd(priv, 0x9097, 0x1664, 0x00000000); + nv_mthd(priv, 0x9097, 0x1610, 0x00000012); + nv_mthd(priv, 0x9097, 0x1608, 0x00000000); + nv_mthd(priv, 0x9097, 0x160c, 0x00000000); + nv_mthd(priv, 0x9097, 0x162c, 0x00000003); + nv_mthd(priv, 0x9097, 0x0210, 0x00000000); + nv_mthd(priv, 0x9097, 0x0320, 0x00000000); + nv_mthd(priv, 0x9097, 0x0324, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0328, 0x3f800000); + nv_mthd(priv, 0x9097, 0x032c, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0330, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0334, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0338, 0x3f800000); + nv_mthd(priv, 0x9097, 0x0750, 0x00000000); + nv_mthd(priv, 0x9097, 0x0760, 0x39291909); + nv_mthd(priv, 0x9097, 0x0764, 0x79695949); + nv_mthd(priv, 0x9097, 0x0768, 0xb9a99989); + nv_mthd(priv, 0x9097, 0x076c, 0xf9e9d9c9); + nv_mthd(priv, 0x9097, 0x0770, 0x30201000); + nv_mthd(priv, 0x9097, 0x0774, 0x70605040); + nv_mthd(priv, 0x9097, 0x0778, 0x00009080); + nv_mthd(priv, 0x9097, 0x0780, 0x39291909); + nv_mthd(priv, 0x9097, 0x0784, 0x79695949); + nv_mthd(priv, 0x9097, 0x0788, 0xb9a99989); + nv_mthd(priv, 0x9097, 0x078c, 0xf9e9d9c9); + nv_mthd(priv, 0x9097, 0x07d0, 0x30201000); + nv_mthd(priv, 0x9097, 0x07d4, 0x70605040); + nv_mthd(priv, 0x9097, 0x07d8, 0x00009080); + nv_mthd(priv, 0x9097, 0x037c, 0x00000001); + nv_mthd(priv, 0x9097, 0x0740, 0x00000000); + nv_mthd(priv, 0x9097, 0x0744, 0x00000000); + nv_mthd(priv, 0x9097, 0x2600, 0x00000000); + nv_mthd(priv, 0x9097, 0x1918, 0x00000000); + nv_mthd(priv, 0x9097, 0x191c, 0x00000900); + nv_mthd(priv, 0x9097, 0x1920, 0x00000405); + nv_mthd(priv, 0x9097, 0x1308, 0x00000001); + nv_mthd(priv, 0x9097, 0x1924, 0x00000000); + nv_mthd(priv, 0x9097, 0x13ac, 0x00000000); + nv_mthd(priv, 0x9097, 0x192c, 0x00000001); + nv_mthd(priv, 0x9097, 0x193c, 0x00002c1c); + nv_mthd(priv, 0x9097, 0x0d7c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0f8c, 0x00000000); + nv_mthd(priv, 0x9097, 0x02c0, 0x00000001); + nv_mthd(priv, 0x9097, 0x1510, 0x00000000); + nv_mthd(priv, 0x9097, 0x1940, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ff4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0ff8, 0x00000000); + nv_mthd(priv, 0x9097, 0x194c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1950, 0x00000000); + nv_mthd(priv, 0x9097, 0x1968, 0x00000000); + nv_mthd(priv, 0x9097, 0x1590, 0x0000003f); + nv_mthd(priv, 0x9097, 0x07e8, 0x00000000); + nv_mthd(priv, 0x9097, 0x07ec, 0x00000000); + nv_mthd(priv, 0x9097, 0x07f0, 0x00000000); + nv_mthd(priv, 0x9097, 0x07f4, 0x00000000); + nv_mthd(priv, 0x9097, 0x196c, 0x00000011); + nv_mthd(priv, 0x9097, 0x197c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0fcc, 0x00000000); + nv_mthd(priv, 0x9097, 0x0fd0, 0x00000000); + nv_mthd(priv, 0x9097, 0x02d8, 0x00000040); + nv_mthd(priv, 0x9097, 0x1980, 0x00000080); + nv_mthd(priv, 0x9097, 0x1504, 0x00000080); + nv_mthd(priv, 0x9097, 0x1984, 0x00000000); + nv_mthd(priv, 0x9097, 0x0300, 0x00000001); + nv_mthd(priv, 0x9097, 0x13a8, 0x00000000); + nv_mthd(priv, 0x9097, 0x12ec, 0x00000000); + nv_mthd(priv, 0x9097, 0x1310, 0x00000000); + nv_mthd(priv, 0x9097, 0x1314, 0x00000001); + nv_mthd(priv, 0x9097, 0x1380, 0x00000000); + nv_mthd(priv, 0x9097, 0x1384, 0x00000001); + nv_mthd(priv, 0x9097, 0x1388, 0x00000001); + nv_mthd(priv, 0x9097, 0x138c, 0x00000001); + nv_mthd(priv, 0x9097, 0x1390, 0x00000001); + nv_mthd(priv, 0x9097, 0x1394, 0x00000000); + nv_mthd(priv, 0x9097, 0x139c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1398, 0x00000000); + nv_mthd(priv, 0x9097, 0x1594, 0x00000000); + nv_mthd(priv, 0x9097, 0x1598, 0x00000001); + nv_mthd(priv, 0x9097, 0x159c, 0x00000001); + nv_mthd(priv, 0x9097, 0x15a0, 0x00000001); + nv_mthd(priv, 0x9097, 0x15a4, 0x00000001); + nv_mthd(priv, 0x9097, 0x0f54, 0x00000000); + nv_mthd(priv, 0x9097, 0x0f58, 0x00000000); + nv_mthd(priv, 0x9097, 0x0f5c, 0x00000000); + nv_mthd(priv, 0x9097, 0x19bc, 0x00000000); + nv_mthd(priv, 0x9097, 0x0f9c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0fa0, 0x00000000); + nv_mthd(priv, 0x9097, 0x12cc, 0x00000000); + nv_mthd(priv, 0x9097, 0x12e8, 0x00000000); + nv_mthd(priv, 0x9097, 0x130c, 0x00000001); + nv_mthd(priv, 0x9097, 0x1360, 0x00000000); + nv_mthd(priv, 0x9097, 0x1364, 0x00000000); + nv_mthd(priv, 0x9097, 0x1368, 0x00000000); + nv_mthd(priv, 0x9097, 0x136c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1370, 0x00000000); + nv_mthd(priv, 0x9097, 0x1374, 0x00000000); + nv_mthd(priv, 0x9097, 0x1378, 0x00000000); + nv_mthd(priv, 0x9097, 0x137c, 0x00000000); + nv_mthd(priv, 0x9097, 0x133c, 0x00000001); + nv_mthd(priv, 0x9097, 0x1340, 0x00000001); + nv_mthd(priv, 0x9097, 0x1344, 0x00000002); + nv_mthd(priv, 0x9097, 0x1348, 0x00000001); + nv_mthd(priv, 0x9097, 0x134c, 0x00000001); + nv_mthd(priv, 0x9097, 0x1350, 0x00000002); + nv_mthd(priv, 0x9097, 0x1358, 0x00000001); + nv_mthd(priv, 0x9097, 0x12e4, 0x00000000); + nv_mthd(priv, 0x9097, 0x131c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1320, 0x00000000); + nv_mthd(priv, 0x9097, 0x1324, 0x00000000); + nv_mthd(priv, 0x9097, 0x1328, 0x00000000); + nv_mthd(priv, 0x9097, 0x19c0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1140, 0x00000000); + nv_mthd(priv, 0x9097, 0x19c4, 0x00000000); + nv_mthd(priv, 0x9097, 0x19c8, 0x00001500); + nv_mthd(priv, 0x9097, 0x135c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0f90, 0x00000000); + nv_mthd(priv, 0x9097, 0x19e0, 0x00000001); + nv_mthd(priv, 0x9097, 0x19e4, 0x00000001); + nv_mthd(priv, 0x9097, 0x19e8, 0x00000001); + nv_mthd(priv, 0x9097, 0x19ec, 0x00000001); + nv_mthd(priv, 0x9097, 0x19f0, 0x00000001); + nv_mthd(priv, 0x9097, 0x19f4, 0x00000001); + nv_mthd(priv, 0x9097, 0x19f8, 0x00000001); + nv_mthd(priv, 0x9097, 0x19fc, 0x00000001); + nv_mthd(priv, 0x9097, 0x19cc, 0x00000001); + nv_mthd(priv, 0x9097, 0x15b8, 0x00000000); + nv_mthd(priv, 0x9097, 0x1a00, 0x00001111); + nv_mthd(priv, 0x9097, 0x1a04, 0x00000000); + nv_mthd(priv, 0x9097, 0x1a08, 0x00000000); + nv_mthd(priv, 0x9097, 0x1a0c, 0x00000000); + nv_mthd(priv, 0x9097, 0x1a10, 0x00000000); + nv_mthd(priv, 0x9097, 0x1a14, 0x00000000); + nv_mthd(priv, 0x9097, 0x1a18, 0x00000000); + nv_mthd(priv, 0x9097, 0x1a1c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0d6c, 0xffff0000); + nv_mthd(priv, 0x9097, 0x0d70, 0xffff0000); + nv_mthd(priv, 0x9097, 0x10f8, 0x00001010); + nv_mthd(priv, 0x9097, 0x0d80, 0x00000000); + nv_mthd(priv, 0x9097, 0x0d84, 0x00000000); + nv_mthd(priv, 0x9097, 0x0d88, 0x00000000); + nv_mthd(priv, 0x9097, 0x0d8c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0d90, 0x00000000); + nv_mthd(priv, 0x9097, 0x0da0, 0x00000000); + nv_mthd(priv, 0x9097, 0x1508, 0x80000000); + nv_mthd(priv, 0x9097, 0x150c, 0x40000000); + nv_mthd(priv, 0x9097, 0x1668, 0x00000000); + nv_mthd(priv, 0x9097, 0x0318, 0x00000008); + nv_mthd(priv, 0x9097, 0x031c, 0x00000008); + nv_mthd(priv, 0x9097, 0x0d9c, 0x00000001); + nv_mthd(priv, 0x9097, 0x07dc, 0x00000000); + nv_mthd(priv, 0x9097, 0x074c, 0x00000055); + nv_mthd(priv, 0x9097, 0x1420, 0x00000003); + nv_mthd(priv, 0x9097, 0x17bc, 0x00000000); + nv_mthd(priv, 0x9097, 0x17c0, 0x00000000); + nv_mthd(priv, 0x9097, 0x17c4, 0x00000001); + nv_mthd(priv, 0x9097, 0x1008, 0x00000008); + nv_mthd(priv, 0x9097, 0x100c, 0x00000040); + nv_mthd(priv, 0x9097, 0x1010, 0x0000012c); + nv_mthd(priv, 0x9097, 0x0d60, 0x00000040); + nv_mthd(priv, 0x9097, 0x075c, 0x00000003); + nv_mthd(priv, 0x9097, 0x1018, 0x00000020); + nv_mthd(priv, 0x9097, 0x101c, 0x00000001); + nv_mthd(priv, 0x9097, 0x1020, 0x00000020); + nv_mthd(priv, 0x9097, 0x1024, 0x00000001); + nv_mthd(priv, 0x9097, 0x1444, 0x00000000); + nv_mthd(priv, 0x9097, 0x1448, 0x00000000); + nv_mthd(priv, 0x9097, 0x144c, 0x00000000); + nv_mthd(priv, 0x9097, 0x0360, 0x20164010); + nv_mthd(priv, 0x9097, 0x0364, 0x00000020); + nv_mthd(priv, 0x9097, 0x0368, 0x00000000); + nv_mthd(priv, 0x9097, 0x0de4, 0x00000000); + nv_mthd(priv, 0x9097, 0x0204, 0x00000006); + nv_mthd(priv, 0x9097, 0x0208, 0x00000000); + nv_mthd(priv, 0x9097, 0x02cc, 0x003fffff); + nv_mthd(priv, 0x9097, 0x02d0, 0x00000c48); + nv_mthd(priv, 0x9097, 0x1220, 0x00000005); + nv_mthd(priv, 0x9097, 0x0fdc, 0x00000000); + nv_mthd(priv, 0x9097, 0x0f98, 0x00300008); + nv_mthd(priv, 0x9097, 0x1284, 0x04000080); + nv_mthd(priv, 0x9097, 0x1450, 0x00300008); + nv_mthd(priv, 0x9097, 0x1454, 0x04000080); + nv_mthd(priv, 0x9097, 0x0214, 0x00000000); + /* in trace, right after 0x90c0, not here */ + nv_mthd(priv, 0x9097, 0x3410, 0x80002006); +} + +static void +nvc0_grctx_generate_9197(struct nvc0_graph_priv *priv) +{ + u32 fermi = nvc0_graph_class(priv); + u32 mthd; + + if (fermi == 0x9197) { + for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4) + nv_mthd(priv, 0x9197, mthd, 0x00000000); + } + nv_mthd(priv, 0x9197, 0x02e4, 0x0000b001); +} + +static void +nvc0_grctx_generate_9297(struct nvc0_graph_priv *priv) +{ + u32 fermi = nvc0_graph_class(priv); + u32 mthd; + + if (fermi == 0x9297) { + for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4) + nv_mthd(priv, 0x9297, mthd, 0x00000000); + } + nv_mthd(priv, 0x9297, 0x036c, 0x00000000); + nv_mthd(priv, 0x9297, 0x0370, 0x00000000); + nv_mthd(priv, 0x9297, 0x07a4, 0x00000000); + nv_mthd(priv, 0x9297, 0x07a8, 0x00000000); + nv_mthd(priv, 0x9297, 0x0374, 0x00000000); + nv_mthd(priv, 0x9297, 0x0378, 0x00000020); +} + +static void +nvc0_grctx_generate_902d(struct nvc0_graph_priv *priv) +{ + nv_mthd(priv, 0x902d, 0x0200, 0x000000cf); + nv_mthd(priv, 0x902d, 0x0204, 0x00000001); + nv_mthd(priv, 0x902d, 0x0208, 0x00000020); + nv_mthd(priv, 0x902d, 0x020c, 0x00000001); + nv_mthd(priv, 0x902d, 0x0210, 0x00000000); + nv_mthd(priv, 0x902d, 0x0214, 0x00000080); + nv_mthd(priv, 0x902d, 0x0218, 0x00000100); + nv_mthd(priv, 0x902d, 0x021c, 0x00000100); + nv_mthd(priv, 0x902d, 0x0220, 0x00000000); + nv_mthd(priv, 0x902d, 0x0224, 0x00000000); + nv_mthd(priv, 0x902d, 0x0230, 0x000000cf); + nv_mthd(priv, 0x902d, 0x0234, 0x00000001); + nv_mthd(priv, 0x902d, 0x0238, 0x00000020); + nv_mthd(priv, 0x902d, 0x023c, 0x00000001); + nv_mthd(priv, 0x902d, 0x0244, 0x00000080); + nv_mthd(priv, 0x902d, 0x0248, 0x00000100); + nv_mthd(priv, 0x902d, 0x024c, 0x00000100); +} + +static void +nvc0_grctx_generate_9039(struct nvc0_graph_priv *priv) +{ + nv_mthd(priv, 0x9039, 0x030c, 0x00000000); + nv_mthd(priv, 0x9039, 0x0310, 0x00000000); + nv_mthd(priv, 0x9039, 0x0314, 0x00000000); + nv_mthd(priv, 0x9039, 0x0320, 0x00000000); + nv_mthd(priv, 0x9039, 0x0238, 0x00000000); + nv_mthd(priv, 0x9039, 0x023c, 0x00000000); + nv_mthd(priv, 0x9039, 0x0318, 0x00000000); + nv_mthd(priv, 0x9039, 0x031c, 0x00000000); +} + +static void +nvc0_grctx_generate_90c0(struct nvc0_graph_priv *priv) +{ + int i; + + for (i = 0; nv_device(priv)->chipset >= 0xd0 && i < 4; i++) { + nv_mthd(priv, 0x90c0, 0x2700 + (i * 0x40), 0x00000000); + nv_mthd(priv, 0x90c0, 0x2720 + (i * 0x40), 0x00000000); + nv_mthd(priv, 0x90c0, 0x2704 + (i * 0x40), 0x00000000); + nv_mthd(priv, 0x90c0, 0x2724 + (i * 0x40), 0x00000000); + nv_mthd(priv, 0x90c0, 0x2708 + (i * 0x40), 0x00000000); + nv_mthd(priv, 0x90c0, 0x2728 + (i * 0x40), 0x00000000); + } + nv_mthd(priv, 0x90c0, 0x270c, 0x00000000); + nv_mthd(priv, 0x90c0, 0x272c, 0x00000000); + nv_mthd(priv, 0x90c0, 0x274c, 0x00000000); + nv_mthd(priv, 0x90c0, 0x276c, 0x00000000); + nv_mthd(priv, 0x90c0, 0x278c, 0x00000000); + nv_mthd(priv, 0x90c0, 0x27ac, 0x00000000); + nv_mthd(priv, 0x90c0, 0x27cc, 0x00000000); + nv_mthd(priv, 0x90c0, 0x27ec, 0x00000000); + for (i = 0; nv_device(priv)->chipset >= 0xd0 && i < 4; i++) { + nv_mthd(priv, 0x90c0, 0x2710 + (i * 0x40), 0x00014000); + nv_mthd(priv, 0x90c0, 0x2730 + (i * 0x40), 0x00014000); + nv_mthd(priv, 0x90c0, 0x2714 + (i * 0x40), 0x00000040); + nv_mthd(priv, 0x90c0, 0x2734 + (i * 0x40), 0x00000040); + } + nv_mthd(priv, 0x90c0, 0x030c, 0x00000001); + nv_mthd(priv, 0x90c0, 0x1944, 0x00000000); + nv_mthd(priv, 0x90c0, 0x0758, 0x00000100); + nv_mthd(priv, 0x90c0, 0x02c4, 0x00000000); + nv_mthd(priv, 0x90c0, 0x0790, 0x00000000); + nv_mthd(priv, 0x90c0, 0x0794, 0x00000000); + nv_mthd(priv, 0x90c0, 0x0798, 0x00000000); + nv_mthd(priv, 0x90c0, 0x079c, 0x00000000); + nv_mthd(priv, 0x90c0, 0x07a0, 0x00000000); + nv_mthd(priv, 0x90c0, 0x077c, 0x00000000); + nv_mthd(priv, 0x90c0, 0x0204, 0x00000000); + nv_mthd(priv, 0x90c0, 0x0208, 0x00000000); + nv_mthd(priv, 0x90c0, 0x020c, 0x00000000); + nv_mthd(priv, 0x90c0, 0x0214, 0x00000000); + nv_mthd(priv, 0x90c0, 0x024c, 0x00000000); + nv_mthd(priv, 0x90c0, 0x0d94, 0x00000001); + nv_mthd(priv, 0x90c0, 0x1608, 0x00000000); + nv_mthd(priv, 0x90c0, 0x160c, 0x00000000); + nv_mthd(priv, 0x90c0, 0x1664, 0x00000000); +} + +static void +nvc0_grctx_generate_dispatch(struct nvc0_graph_priv *priv) +{ + int i; + + nv_wr32(priv, 0x404004, 0x00000000); + nv_wr32(priv, 0x404008, 0x00000000); + nv_wr32(priv, 0x40400c, 0x00000000); + nv_wr32(priv, 0x404010, 0x00000000); + nv_wr32(priv, 0x404014, 0x00000000); + nv_wr32(priv, 0x404018, 0x00000000); + nv_wr32(priv, 0x40401c, 0x00000000); + nv_wr32(priv, 0x404020, 0x00000000); + nv_wr32(priv, 0x404024, 0x00000000); + nv_wr32(priv, 0x404028, 0x00000000); + nv_wr32(priv, 0x40402c, 0x00000000); + nv_wr32(priv, 0x404044, 0x00000000); + nv_wr32(priv, 0x404094, 0x00000000); + nv_wr32(priv, 0x404098, 0x00000000); + nv_wr32(priv, 0x40409c, 0x00000000); + nv_wr32(priv, 0x4040a0, 0x00000000); + nv_wr32(priv, 0x4040a4, 0x00000000); + nv_wr32(priv, 0x4040a8, 0x00000000); + nv_wr32(priv, 0x4040ac, 0x00000000); + nv_wr32(priv, 0x4040b0, 0x00000000); + nv_wr32(priv, 0x4040b4, 0x00000000); + nv_wr32(priv, 0x4040b8, 0x00000000); + nv_wr32(priv, 0x4040bc, 0x00000000); + nv_wr32(priv, 0x4040c0, 0x00000000); + nv_wr32(priv, 0x4040c4, 0x00000000); + nv_wr32(priv, 0x4040c8, 0xf0000087); + nv_wr32(priv, 0x4040d4, 0x00000000); + nv_wr32(priv, 0x4040d8, 0x00000000); + nv_wr32(priv, 0x4040dc, 0x00000000); + nv_wr32(priv, 0x4040e0, 0x00000000); + nv_wr32(priv, 0x4040e4, 0x00000000); + nv_wr32(priv, 0x4040e8, 0x00001000); + nv_wr32(priv, 0x4040f8, 0x00000000); + nv_wr32(priv, 0x404130, 0x00000000); + nv_wr32(priv, 0x404134, 0x00000000); + nv_wr32(priv, 0x404138, 0x20000040); + nv_wr32(priv, 0x404150, 0x0000002e); + nv_wr32(priv, 0x404154, 0x00000400); + nv_wr32(priv, 0x404158, 0x00000200); + nv_wr32(priv, 0x404164, 0x00000055); + nv_wr32(priv, 0x404168, 0x00000000); + nv_wr32(priv, 0x404174, 0x00000000); + nv_wr32(priv, 0x404178, 0x00000000); + nv_wr32(priv, 0x40417c, 0x00000000); + for (i = 0; i < 8; i++) + nv_wr32(priv, 0x404200 + (i * 4), 0x00000000); /* subc */ +} + +static void +nvc0_grctx_generate_macro(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x404404, 0x00000000); + nv_wr32(priv, 0x404408, 0x00000000); + nv_wr32(priv, 0x40440c, 0x00000000); + nv_wr32(priv, 0x404410, 0x00000000); + nv_wr32(priv, 0x404414, 0x00000000); + nv_wr32(priv, 0x404418, 0x00000000); + nv_wr32(priv, 0x40441c, 0x00000000); + nv_wr32(priv, 0x404420, 0x00000000); + nv_wr32(priv, 0x404424, 0x00000000); + nv_wr32(priv, 0x404428, 0x00000000); + nv_wr32(priv, 0x40442c, 0x00000000); + nv_wr32(priv, 0x404430, 0x00000000); + nv_wr32(priv, 0x404434, 0x00000000); + nv_wr32(priv, 0x404438, 0x00000000); + nv_wr32(priv, 0x404460, 0x00000000); + nv_wr32(priv, 0x404464, 0x00000000); + nv_wr32(priv, 0x404468, 0x00ffffff); + nv_wr32(priv, 0x40446c, 0x00000000); + nv_wr32(priv, 0x404480, 0x00000001); + nv_wr32(priv, 0x404498, 0x00000001); +} + +static void +nvc0_grctx_generate_m2mf(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x404604, 0x00000015); + nv_wr32(priv, 0x404608, 0x00000000); + nv_wr32(priv, 0x40460c, 0x00002e00); + nv_wr32(priv, 0x404610, 0x00000100); + nv_wr32(priv, 0x404618, 0x00000000); + nv_wr32(priv, 0x40461c, 0x00000000); + nv_wr32(priv, 0x404620, 0x00000000); + nv_wr32(priv, 0x404624, 0x00000000); + nv_wr32(priv, 0x404628, 0x00000000); + nv_wr32(priv, 0x40462c, 0x00000000); + nv_wr32(priv, 0x404630, 0x00000000); + nv_wr32(priv, 0x404634, 0x00000000); + nv_wr32(priv, 0x404638, 0x00000004); + nv_wr32(priv, 0x40463c, 0x00000000); + nv_wr32(priv, 0x404640, 0x00000000); + nv_wr32(priv, 0x404644, 0x00000000); + nv_wr32(priv, 0x404648, 0x00000000); + nv_wr32(priv, 0x40464c, 0x00000000); + nv_wr32(priv, 0x404650, 0x00000000); + nv_wr32(priv, 0x404654, 0x00000000); + nv_wr32(priv, 0x404658, 0x00000000); + nv_wr32(priv, 0x40465c, 0x007f0100); + nv_wr32(priv, 0x404660, 0x00000000); + nv_wr32(priv, 0x404664, 0x00000000); + nv_wr32(priv, 0x404668, 0x00000000); + nv_wr32(priv, 0x40466c, 0x00000000); + nv_wr32(priv, 0x404670, 0x00000000); + nv_wr32(priv, 0x404674, 0x00000000); + nv_wr32(priv, 0x404678, 0x00000000); + nv_wr32(priv, 0x40467c, 0x00000002); + nv_wr32(priv, 0x404680, 0x00000000); + nv_wr32(priv, 0x404684, 0x00000000); + nv_wr32(priv, 0x404688, 0x00000000); + nv_wr32(priv, 0x40468c, 0x00000000); + nv_wr32(priv, 0x404690, 0x00000000); + nv_wr32(priv, 0x404694, 0x00000000); + nv_wr32(priv, 0x404698, 0x00000000); + nv_wr32(priv, 0x40469c, 0x00000000); + nv_wr32(priv, 0x4046a0, 0x007f0080); + nv_wr32(priv, 0x4046a4, 0x00000000); + nv_wr32(priv, 0x4046a8, 0x00000000); + nv_wr32(priv, 0x4046ac, 0x00000000); + nv_wr32(priv, 0x4046b0, 0x00000000); + nv_wr32(priv, 0x4046b4, 0x00000000); + nv_wr32(priv, 0x4046b8, 0x00000000); + nv_wr32(priv, 0x4046bc, 0x00000000); + nv_wr32(priv, 0x4046c0, 0x00000000); + nv_wr32(priv, 0x4046c4, 0x00000000); + nv_wr32(priv, 0x4046c8, 0x00000000); + nv_wr32(priv, 0x4046cc, 0x00000000); + nv_wr32(priv, 0x4046d0, 0x00000000); + nv_wr32(priv, 0x4046d4, 0x00000000); + nv_wr32(priv, 0x4046d8, 0x00000000); + nv_wr32(priv, 0x4046dc, 0x00000000); + nv_wr32(priv, 0x4046e0, 0x00000000); + nv_wr32(priv, 0x4046e4, 0x00000000); + nv_wr32(priv, 0x4046e8, 0x00000000); + nv_wr32(priv, 0x4046f0, 0x00000000); + nv_wr32(priv, 0x4046f4, 0x00000000); +} + +static void +nvc0_grctx_generate_unk47xx(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x404700, 0x00000000); + nv_wr32(priv, 0x404704, 0x00000000); + nv_wr32(priv, 0x404708, 0x00000000); + nv_wr32(priv, 0x40470c, 0x00000000); + nv_wr32(priv, 0x404710, 0x00000000); + nv_wr32(priv, 0x404714, 0x00000000); + nv_wr32(priv, 0x404718, 0x00000000); + nv_wr32(priv, 0x40471c, 0x00000000); + nv_wr32(priv, 0x404720, 0x00000000); + nv_wr32(priv, 0x404724, 0x00000000); + nv_wr32(priv, 0x404728, 0x00000000); + nv_wr32(priv, 0x40472c, 0x00000000); + nv_wr32(priv, 0x404730, 0x00000000); + nv_wr32(priv, 0x404734, 0x00000100); + nv_wr32(priv, 0x404738, 0x00000000); + nv_wr32(priv, 0x40473c, 0x00000000); + nv_wr32(priv, 0x404740, 0x00000000); + nv_wr32(priv, 0x404744, 0x00000000); + nv_wr32(priv, 0x404748, 0x00000000); + nv_wr32(priv, 0x40474c, 0x00000000); + nv_wr32(priv, 0x404750, 0x00000000); + nv_wr32(priv, 0x404754, 0x00000000); +} + +static void +nvc0_grctx_generate_shaders(struct nvc0_graph_priv *priv) +{ + + if (nv_device(priv)->chipset >= 0xd0) { + nv_wr32(priv, 0x405800, 0x0f8000bf); + nv_wr32(priv, 0x405830, 0x02180218); + nv_wr32(priv, 0x405834, 0x08000000); + } else + if (nv_device(priv)->chipset == 0xc1) { + nv_wr32(priv, 0x405800, 0x0f8000bf); + nv_wr32(priv, 0x405830, 0x02180218); + nv_wr32(priv, 0x405834, 0x00000000); + } else { + nv_wr32(priv, 0x405800, 0x078000bf); + nv_wr32(priv, 0x405830, 0x02180000); + nv_wr32(priv, 0x405834, 0x00000000); + } + nv_wr32(priv, 0x405838, 0x00000000); + nv_wr32(priv, 0x405854, 0x00000000); + nv_wr32(priv, 0x405870, 0x00000001); + nv_wr32(priv, 0x405874, 0x00000001); + nv_wr32(priv, 0x405878, 0x00000001); + nv_wr32(priv, 0x40587c, 0x00000001); + nv_wr32(priv, 0x405a00, 0x00000000); + nv_wr32(priv, 0x405a04, 0x00000000); + nv_wr32(priv, 0x405a18, 0x00000000); +} + +static void +nvc0_grctx_generate_unk60xx(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x406020, 0x000103c1); + nv_wr32(priv, 0x406028, 0x00000001); + nv_wr32(priv, 0x40602c, 0x00000001); + nv_wr32(priv, 0x406030, 0x00000001); + nv_wr32(priv, 0x406034, 0x00000001); +} + +static void +nvc0_grctx_generate_unk64xx(struct nvc0_graph_priv *priv) +{ + + nv_wr32(priv, 0x4064a8, 0x00000000); + nv_wr32(priv, 0x4064ac, 0x00003fff); + nv_wr32(priv, 0x4064b4, 0x00000000); + nv_wr32(priv, 0x4064b8, 0x00000000); + if (nv_device(priv)->chipset >= 0xd0) + nv_wr32(priv, 0x4064bc, 0x00000000); + if (nv_device(priv)->chipset == 0xc1 || + nv_device(priv)->chipset >= 0xd0) { + nv_wr32(priv, 0x4064c0, 0x80140078); + nv_wr32(priv, 0x4064c4, 0x0086ffff); + } +} + +static void +nvc0_grctx_generate_tpbus(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x407804, 0x00000023); + nv_wr32(priv, 0x40780c, 0x0a418820); + nv_wr32(priv, 0x407810, 0x062080e6); + nv_wr32(priv, 0x407814, 0x020398a4); + nv_wr32(priv, 0x407818, 0x0e629062); + nv_wr32(priv, 0x40781c, 0x0a418820); + nv_wr32(priv, 0x407820, 0x000000e6); + nv_wr32(priv, 0x4078bc, 0x00000103); +} + +static void +nvc0_grctx_generate_ccache(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x408000, 0x00000000); + nv_wr32(priv, 0x408004, 0x00000000); + nv_wr32(priv, 0x408008, 0x00000018); + nv_wr32(priv, 0x40800c, 0x00000000); + nv_wr32(priv, 0x408010, 0x00000000); + nv_wr32(priv, 0x408014, 0x00000069); + nv_wr32(priv, 0x408018, 0xe100e100); + nv_wr32(priv, 0x408064, 0x00000000); +} + +static void +nvc0_grctx_generate_rop(struct nvc0_graph_priv *priv) +{ + int chipset = nv_device(priv)->chipset; + + /* ROPC_BROADCAST */ + nv_wr32(priv, 0x408800, 0x02802a3c); + nv_wr32(priv, 0x408804, 0x00000040); + if (chipset >= 0xd0) { + nv_wr32(priv, 0x408808, 0x1043e005); + nv_wr32(priv, 0x408900, 0x3080b801); + nv_wr32(priv, 0x408904, 0x1043e005); + nv_wr32(priv, 0x408908, 0x00c8102f); + } else + if (chipset == 0xc1) { + nv_wr32(priv, 0x408808, 0x1003e005); + nv_wr32(priv, 0x408900, 0x3080b801); + nv_wr32(priv, 0x408904, 0x62000001); + nv_wr32(priv, 0x408908, 0x00c80929); + } else { + nv_wr32(priv, 0x408808, 0x0003e00d); + nv_wr32(priv, 0x408900, 0x3080b801); + nv_wr32(priv, 0x408904, 0x02000001); + nv_wr32(priv, 0x408908, 0x00c80929); + } + nv_wr32(priv, 0x40890c, 0x00000000); + nv_wr32(priv, 0x408980, 0x0000011d); +} + +static void +nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv) +{ + int chipset = nv_device(priv)->chipset; + int i; + + /* GPC_BROADCAST */ + nv_wr32(priv, 0x418380, 0x00000016); + nv_wr32(priv, 0x418400, 0x38004e00); + nv_wr32(priv, 0x418404, 0x71e0ffff); + nv_wr32(priv, 0x418408, 0x00000000); + nv_wr32(priv, 0x41840c, 0x00001008); + nv_wr32(priv, 0x418410, 0x0fff0fff); + nv_wr32(priv, 0x418414, chipset < 0xd0 ? 0x00200fff : 0x02200fff); + nv_wr32(priv, 0x418450, 0x00000000); + nv_wr32(priv, 0x418454, 0x00000000); + nv_wr32(priv, 0x418458, 0x00000000); + nv_wr32(priv, 0x41845c, 0x00000000); + nv_wr32(priv, 0x418460, 0x00000000); + nv_wr32(priv, 0x418464, 0x00000000); + nv_wr32(priv, 0x418468, 0x00000001); + nv_wr32(priv, 0x41846c, 0x00000000); + nv_wr32(priv, 0x418470, 0x00000000); + nv_wr32(priv, 0x418600, 0x0000001f); + nv_wr32(priv, 0x418684, 0x0000000f); + nv_wr32(priv, 0x418700, 0x00000002); + nv_wr32(priv, 0x418704, 0x00000080); + nv_wr32(priv, 0x418708, 0x00000000); + nv_wr32(priv, 0x41870c, chipset < 0xd0 ? 0x07c80000 : 0x00000000); + nv_wr32(priv, 0x418710, 0x00000000); + nv_wr32(priv, 0x418800, chipset < 0xd0 ? 0x0006860a : 0x7006860a); + nv_wr32(priv, 0x418808, 0x00000000); + nv_wr32(priv, 0x41880c, 0x00000000); + nv_wr32(priv, 0x418810, 0x00000000); + nv_wr32(priv, 0x418828, 0x00008442); + if (chipset == 0xc1 || chipset >= 0xd0) + nv_wr32(priv, 0x418830, 0x10000001); + else + nv_wr32(priv, 0x418830, 0x00000001); + nv_wr32(priv, 0x4188d8, 0x00000008); + nv_wr32(priv, 0x4188e0, 0x01000000); + nv_wr32(priv, 0x4188e8, 0x00000000); + nv_wr32(priv, 0x4188ec, 0x00000000); + nv_wr32(priv, 0x4188f0, 0x00000000); + nv_wr32(priv, 0x4188f4, 0x00000000); + nv_wr32(priv, 0x4188f8, 0x00000000); + if (chipset >= 0xd0) + nv_wr32(priv, 0x4188fc, 0x20100008); + else if (chipset == 0xc1) + nv_wr32(priv, 0x4188fc, 0x00100018); + else + nv_wr32(priv, 0x4188fc, 0x00100000); + nv_wr32(priv, 0x41891c, 0x00ff00ff); + nv_wr32(priv, 0x418924, 0x00000000); + nv_wr32(priv, 0x418928, 0x00ffff00); + nv_wr32(priv, 0x41892c, 0x0000ff00); + for (i = 0; i < 8; i++) { + nv_wr32(priv, 0x418a00 + (i * 0x20), 0x00000000); + nv_wr32(priv, 0x418a04 + (i * 0x20), 0x00000000); + nv_wr32(priv, 0x418a08 + (i * 0x20), 0x00000000); + nv_wr32(priv, 0x418a0c + (i * 0x20), 0x00010000); + nv_wr32(priv, 0x418a10 + (i * 0x20), 0x00000000); + nv_wr32(priv, 0x418a14 + (i * 0x20), 0x00000000); + nv_wr32(priv, 0x418a18 + (i * 0x20), 0x00000000); + } + nv_wr32(priv, 0x418b00, chipset < 0xd0 ? 0x00000000 : 0x00000006); + nv_wr32(priv, 0x418b08, 0x0a418820); + nv_wr32(priv, 0x418b0c, 0x062080e6); + nv_wr32(priv, 0x418b10, 0x020398a4); + nv_wr32(priv, 0x418b14, 0x0e629062); + nv_wr32(priv, 0x418b18, 0x0a418820); + nv_wr32(priv, 0x418b1c, 0x000000e6); + nv_wr32(priv, 0x418bb8, 0x00000103); + nv_wr32(priv, 0x418c08, 0x00000001); + nv_wr32(priv, 0x418c10, 0x00000000); + nv_wr32(priv, 0x418c14, 0x00000000); + nv_wr32(priv, 0x418c18, 0x00000000); + nv_wr32(priv, 0x418c1c, 0x00000000); + nv_wr32(priv, 0x418c20, 0x00000000); + nv_wr32(priv, 0x418c24, 0x00000000); + nv_wr32(priv, 0x418c28, 0x00000000); + nv_wr32(priv, 0x418c2c, 0x00000000); + if (chipset == 0xc1 || chipset >= 0xd0) + nv_wr32(priv, 0x418c6c, 0x00000001); + nv_wr32(priv, 0x418c80, 0x20200004); + nv_wr32(priv, 0x418c8c, 0x00000001); + nv_wr32(priv, 0x419000, 0x00000780); + nv_wr32(priv, 0x419004, 0x00000000); + nv_wr32(priv, 0x419008, 0x00000000); + nv_wr32(priv, 0x419014, 0x00000004); +} + +static void +nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv) +{ + int chipset = nv_device(priv)->chipset; + + /* GPC_BROADCAST.TP_BROADCAST */ + nv_wr32(priv, 0x419818, 0x00000000); + nv_wr32(priv, 0x41983c, 0x00038bc7); + nv_wr32(priv, 0x419848, 0x00000000); + if (chipset == 0xc1 || chipset >= 0xd0) + nv_wr32(priv, 0x419864, 0x00000129); + else + nv_wr32(priv, 0x419864, 0x0000012a); + nv_wr32(priv, 0x419888, 0x00000000); + nv_wr32(priv, 0x419a00, 0x000001f0); + nv_wr32(priv, 0x419a04, 0x00000001); + nv_wr32(priv, 0x419a08, 0x00000023); + nv_wr32(priv, 0x419a0c, 0x00020000); + nv_wr32(priv, 0x419a10, 0x00000000); + nv_wr32(priv, 0x419a14, 0x00000200); + nv_wr32(priv, 0x419a1c, 0x00000000); + nv_wr32(priv, 0x419a20, 0x00000800); + if (chipset >= 0xd0) + nv_wr32(priv, 0x00419ac4, 0x0017f440); + else if (chipset != 0xc0 && chipset != 0xc8) + nv_wr32(priv, 0x00419ac4, 0x0007f440); + nv_wr32(priv, 0x419b00, 0x0a418820); + nv_wr32(priv, 0x419b04, 0x062080e6); + nv_wr32(priv, 0x419b08, 0x020398a4); + nv_wr32(priv, 0x419b0c, 0x0e629062); + nv_wr32(priv, 0x419b10, 0x0a418820); + nv_wr32(priv, 0x419b14, 0x000000e6); + nv_wr32(priv, 0x419bd0, 0x00900103); + if (chipset == 0xc1 || chipset >= 0xd0) + nv_wr32(priv, 0x419be0, 0x00400001); + else + nv_wr32(priv, 0x419be0, 0x00000001); + nv_wr32(priv, 0x419be4, 0x00000000); + nv_wr32(priv, 0x419c00, chipset < 0xd0 ? 0x00000002 : 0x0000000a); + nv_wr32(priv, 0x419c04, 0x00000006); + nv_wr32(priv, 0x419c08, 0x00000002); + nv_wr32(priv, 0x419c20, 0x00000000); + if (nv_device(priv)->chipset >= 0xd0) { + nv_wr32(priv, 0x419c24, 0x00084210); + nv_wr32(priv, 0x419c28, 0x3cf3cf3c); + nv_wr32(priv, 0x419cb0, 0x00020048); + } else + if (chipset == 0xce || chipset == 0xcf) { + nv_wr32(priv, 0x419cb0, 0x00020048); + } else { + nv_wr32(priv, 0x419cb0, 0x00060048); + } + nv_wr32(priv, 0x419ce8, 0x00000000); + nv_wr32(priv, 0x419cf4, 0x00000183); + if (chipset == 0xc1 || chipset >= 0xd0) + nv_wr32(priv, 0x419d20, 0x12180000); + else + nv_wr32(priv, 0x419d20, 0x02180000); + nv_wr32(priv, 0x419d24, 0x00001fff); + if (chipset == 0xc1 || chipset >= 0xd0) + nv_wr32(priv, 0x419d44, 0x02180218); + nv_wr32(priv, 0x419e04, 0x00000000); + nv_wr32(priv, 0x419e08, 0x00000000); + nv_wr32(priv, 0x419e0c, 0x00000000); + nv_wr32(priv, 0x419e10, 0x00000002); + nv_wr32(priv, 0x419e44, 0x001beff2); + nv_wr32(priv, 0x419e48, 0x00000000); + nv_wr32(priv, 0x419e4c, 0x0000000f); + nv_wr32(priv, 0x419e50, 0x00000000); + nv_wr32(priv, 0x419e54, 0x00000000); + nv_wr32(priv, 0x419e58, 0x00000000); + nv_wr32(priv, 0x419e5c, 0x00000000); + nv_wr32(priv, 0x419e60, 0x00000000); + nv_wr32(priv, 0x419e64, 0x00000000); + nv_wr32(priv, 0x419e68, 0x00000000); + nv_wr32(priv, 0x419e6c, 0x00000000); + nv_wr32(priv, 0x419e70, 0x00000000); + nv_wr32(priv, 0x419e74, 0x00000000); + nv_wr32(priv, 0x419e78, 0x00000000); + nv_wr32(priv, 0x419e7c, 0x00000000); + nv_wr32(priv, 0x419e80, 0x00000000); + nv_wr32(priv, 0x419e84, 0x00000000); + nv_wr32(priv, 0x419e88, 0x00000000); + nv_wr32(priv, 0x419e8c, 0x00000000); + nv_wr32(priv, 0x419e90, 0x00000000); + nv_wr32(priv, 0x419e98, 0x00000000); + if (chipset != 0xc0 && chipset != 0xc8) + nv_wr32(priv, 0x419ee0, 0x00011110); + nv_wr32(priv, 0x419f50, 0x00000000); + nv_wr32(priv, 0x419f54, 0x00000000); + if (chipset != 0xc0 && chipset != 0xc8) + nv_wr32(priv, 0x419f58, 0x00000000); +} + +int +nvc0_grctx_generate(struct nvc0_graph_priv *priv) +{ + struct nvc0_grctx info; + int ret, i, gpc, tpc, id; + u32 fermi = nvc0_graph_class(priv); + u32 r000260, tmp; + + ret = nvc0_grctx_init(priv, &info); + if (ret) + return ret; + + r000260 = nv_rd32(priv, 0x000260); + nv_wr32(priv, 0x000260, r000260 & ~1); + nv_wr32(priv, 0x400208, 0x00000000); + + nvc0_grctx_generate_dispatch(priv); + nvc0_grctx_generate_macro(priv); + nvc0_grctx_generate_m2mf(priv); + nvc0_grctx_generate_unk47xx(priv); + nvc0_grctx_generate_shaders(priv); + nvc0_grctx_generate_unk60xx(priv); + nvc0_grctx_generate_unk64xx(priv); + nvc0_grctx_generate_tpbus(priv); + nvc0_grctx_generate_ccache(priv); + nvc0_grctx_generate_rop(priv); + nvc0_grctx_generate_gpc(priv); + nvc0_grctx_generate_tp(priv); + + nv_wr32(priv, 0x404154, 0x00000000); + + /* generate per-context mmio list data */ + mmio_data(0x002000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); + mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); + mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); + mmio_list(0x408004, 0x00000000, 8, 0); + mmio_list(0x408008, 0x80000018, 0, 0); + mmio_list(0x40800c, 0x00000000, 8, 1); + mmio_list(0x408010, 0x80000000, 0, 0); + mmio_list(0x418810, 0x80000000, 12, 2); + mmio_list(0x419848, 0x10000000, 12, 2); + mmio_list(0x419004, 0x00000000, 8, 1); + mmio_list(0x419008, 0x00000000, 0, 0); + mmio_list(0x418808, 0x00000000, 8, 0); + mmio_list(0x41880c, 0x80000018, 0, 0); + if (nv_device(priv)->chipset != 0xc1) { + tmp = 0x02180000; + mmio_list(0x405830, tmp, 0, 0); + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { + u32 reg = TPC_UNIT(gpc, tpc, 0x0520); + mmio_list(reg, tmp, 0, 0); + tmp += 0x0324; + } + } + } else { + tmp = 0x02180000; + mmio_list(0x405830, 0x00000218 | tmp, 0, 0); + mmio_list(0x4064c4, 0x0086ffff, 0, 0); + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { + u32 reg = TPC_UNIT(gpc, tpc, 0x0520); + mmio_list(reg, 0x10000000 | tmp, 0, 0); + tmp += 0x0324; + } + for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { + u32 reg = TPC_UNIT(gpc, tpc, 0x0544); + mmio_list(reg, tmp, 0, 0); + tmp += 0x0324; + } + } + } + + for (tpc = 0, id = 0; tpc < 4; tpc++) { + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + if (tpc < priv->tpc_nr[gpc]) { + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x4e8), id); + nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id); + id++; + } + + nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]); + nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]); + } + } + + tmp = 0; + for (i = 0; i < priv->gpc_nr; i++) + tmp |= priv->tpc_nr[i] << (i * 4); + nv_wr32(priv, 0x406028, tmp); + nv_wr32(priv, 0x405870, tmp); + + nv_wr32(priv, 0x40602c, 0x00000000); + nv_wr32(priv, 0x405874, 0x00000000); + nv_wr32(priv, 0x406030, 0x00000000); + nv_wr32(priv, 0x405878, 0x00000000); + nv_wr32(priv, 0x406034, 0x00000000); + nv_wr32(priv, 0x40587c, 0x00000000); + + if (1) { + u8 tpcnr[GPC_MAX], data[TPC_MAX]; + + memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); + memset(data, 0x1f, sizeof(data)); + + gpc = -1; + for (tpc = 0; tpc < priv->tpc_total; tpc++) { + do { + gpc = (gpc + 1) % priv->gpc_nr; + } while (!tpcnr[gpc]); + tpcnr[gpc]--; + data[tpc] = gpc; + } + + for (i = 0; i < 4; i++) + nv_wr32(priv, 0x4060a8 + (i * 4), ((u32 *)data)[i]); + } + + if (1) { + u32 data[6] = {}, data2[2] = {}; + u8 tpcnr[GPC_MAX]; + u8 shift, ntpcv; + + /* calculate first set of magics */ + memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); + + gpc = -1; + for (tpc = 0; tpc < priv->tpc_total; tpc++) { + do { + gpc = (gpc + 1) % priv->gpc_nr; + } while (!tpcnr[gpc]); + tpcnr[gpc]--; + + data[tpc / 6] |= gpc << ((tpc % 6) * 5); + } + + for (; tpc < 32; tpc++) + data[tpc / 6] |= 7 << ((tpc % 6) * 5); + + /* and the second... */ + shift = 0; + ntpcv = priv->tpc_total; + while (!(ntpcv & (1 << 4))) { + ntpcv <<= 1; + shift++; + } + + data2[0] = (ntpcv << 16); + data2[0] |= (shift << 21); + data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24); + for (i = 1; i < 7; i++) + data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5); + + /* GPC_BROADCAST */ + nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) | + priv->magic_not_rop_nr); + for (i = 0; i < 6; i++) + nv_wr32(priv, 0x418b08 + (i * 4), data[i]); + + /* GPC_BROADCAST.TP_BROADCAST */ + nv_wr32(priv, 0x419bd0, (priv->tpc_total << 8) | + priv->magic_not_rop_nr | + data2[0]); + nv_wr32(priv, 0x419be4, data2[1]); + for (i = 0; i < 6; i++) + nv_wr32(priv, 0x419b00 + (i * 4), data[i]); + + /* UNK78xx */ + nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) | + priv->magic_not_rop_nr); + for (i = 0; i < 6; i++) + nv_wr32(priv, 0x40780c + (i * 4), data[i]); + } + + if (1) { + u32 tpc_mask = 0, tpc_set = 0; + u8 tpcnr[GPC_MAX], a, b; + + memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); + for (gpc = 0; gpc < priv->gpc_nr; gpc++) + tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8); + + for (i = 0, gpc = -1, b = -1; i < 32; i++) { + a = (i * (priv->tpc_total - 1)) / 32; + if (a != b) { + b = a; + do { + gpc = (gpc + 1) % priv->gpc_nr; + } while (!tpcnr[gpc]); + tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--; + + tpc_set |= 1 << ((gpc * 8) + tpc); + } + + nv_wr32(priv, 0x406800 + (i * 0x20), tpc_set); + nv_wr32(priv, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask); + } + } + + nv_wr32(priv, 0x400208, 0x80000000); + + nv_icmd(priv, 0x00001000, 0x00000004); + nv_icmd(priv, 0x000000a9, 0x0000ffff); + nv_icmd(priv, 0x00000038, 0x0fac6881); + nv_icmd(priv, 0x0000003d, 0x00000001); + nv_icmd(priv, 0x000000e8, 0x00000400); + nv_icmd(priv, 0x000000e9, 0x00000400); + nv_icmd(priv, 0x000000ea, 0x00000400); + nv_icmd(priv, 0x000000eb, 0x00000400); + nv_icmd(priv, 0x000000ec, 0x00000400); + nv_icmd(priv, 0x000000ed, 0x00000400); + nv_icmd(priv, 0x000000ee, 0x00000400); + nv_icmd(priv, 0x000000ef, 0x00000400); + nv_icmd(priv, 0x00000078, 0x00000300); + nv_icmd(priv, 0x00000079, 0x00000300); + nv_icmd(priv, 0x0000007a, 0x00000300); + nv_icmd(priv, 0x0000007b, 0x00000300); + nv_icmd(priv, 0x0000007c, 0x00000300); + nv_icmd(priv, 0x0000007d, 0x00000300); + nv_icmd(priv, 0x0000007e, 0x00000300); + nv_icmd(priv, 0x0000007f, 0x00000300); + nv_icmd(priv, 0x00000050, 0x00000011); + nv_icmd(priv, 0x00000058, 0x00000008); + nv_icmd(priv, 0x00000059, 0x00000008); + nv_icmd(priv, 0x0000005a, 0x00000008); + nv_icmd(priv, 0x0000005b, 0x00000008); + nv_icmd(priv, 0x0000005c, 0x00000008); + nv_icmd(priv, 0x0000005d, 0x00000008); + nv_icmd(priv, 0x0000005e, 0x00000008); + nv_icmd(priv, 0x0000005f, 0x00000008); + nv_icmd(priv, 0x00000208, 0x00000001); + nv_icmd(priv, 0x00000209, 0x00000001); + nv_icmd(priv, 0x0000020a, 0x00000001); + nv_icmd(priv, 0x0000020b, 0x00000001); + nv_icmd(priv, 0x0000020c, 0x00000001); + nv_icmd(priv, 0x0000020d, 0x00000001); + nv_icmd(priv, 0x0000020e, 0x00000001); + nv_icmd(priv, 0x0000020f, 0x00000001); + nv_icmd(priv, 0x00000081, 0x00000001); + nv_icmd(priv, 0x00000085, 0x00000004); + nv_icmd(priv, 0x00000088, 0x00000400); + nv_icmd(priv, 0x00000090, 0x00000300); + nv_icmd(priv, 0x00000098, 0x00001001); + nv_icmd(priv, 0x000000e3, 0x00000001); + nv_icmd(priv, 0x000000da, 0x00000001); + nv_icmd(priv, 0x000000f8, 0x00000003); + nv_icmd(priv, 0x000000fa, 0x00000001); + nv_icmd(priv, 0x0000009f, 0x0000ffff); + nv_icmd(priv, 0x000000a0, 0x0000ffff); + nv_icmd(priv, 0x000000a1, 0x0000ffff); + nv_icmd(priv, 0x000000a2, 0x0000ffff); + nv_icmd(priv, 0x000000b1, 0x00000001); + nv_icmd(priv, 0x000000b2, 0x00000000); + nv_icmd(priv, 0x000000b3, 0x00000000); + nv_icmd(priv, 0x000000b4, 0x00000000); + nv_icmd(priv, 0x000000b5, 0x00000000); + nv_icmd(priv, 0x000000b6, 0x00000000); + nv_icmd(priv, 0x000000b7, 0x00000000); + nv_icmd(priv, 0x000000b8, 0x00000000); + nv_icmd(priv, 0x000000b9, 0x00000000); + nv_icmd(priv, 0x000000ba, 0x00000000); + nv_icmd(priv, 0x000000bb, 0x00000000); + nv_icmd(priv, 0x000000bc, 0x00000000); + nv_icmd(priv, 0x000000bd, 0x00000000); + nv_icmd(priv, 0x000000be, 0x00000000); + nv_icmd(priv, 0x000000bf, 0x00000000); + nv_icmd(priv, 0x000000c0, 0x00000000); + nv_icmd(priv, 0x000000c1, 0x00000000); + nv_icmd(priv, 0x000000c2, 0x00000000); + nv_icmd(priv, 0x000000c3, 0x00000000); + nv_icmd(priv, 0x000000c4, 0x00000000); + nv_icmd(priv, 0x000000c5, 0x00000000); + nv_icmd(priv, 0x000000c6, 0x00000000); + nv_icmd(priv, 0x000000c7, 0x00000000); + nv_icmd(priv, 0x000000c8, 0x00000000); + nv_icmd(priv, 0x000000c9, 0x00000000); + nv_icmd(priv, 0x000000ca, 0x00000000); + nv_icmd(priv, 0x000000cb, 0x00000000); + nv_icmd(priv, 0x000000cc, 0x00000000); + nv_icmd(priv, 0x000000cd, 0x00000000); + nv_icmd(priv, 0x000000ce, 0x00000000); + nv_icmd(priv, 0x000000cf, 0x00000000); + nv_icmd(priv, 0x000000d0, 0x00000000); + nv_icmd(priv, 0x000000d1, 0x00000000); + nv_icmd(priv, 0x000000d2, 0x00000000); + nv_icmd(priv, 0x000000d3, 0x00000000); + nv_icmd(priv, 0x000000d4, 0x00000000); + nv_icmd(priv, 0x000000d5, 0x00000000); + nv_icmd(priv, 0x000000d6, 0x00000000); + nv_icmd(priv, 0x000000d7, 0x00000000); + nv_icmd(priv, 0x000000d8, 0x00000000); + nv_icmd(priv, 0x000000d9, 0x00000000); + nv_icmd(priv, 0x00000210, 0x00000040); + nv_icmd(priv, 0x00000211, 0x00000040); + nv_icmd(priv, 0x00000212, 0x00000040); + nv_icmd(priv, 0x00000213, 0x00000040); + nv_icmd(priv, 0x00000214, 0x00000040); + nv_icmd(priv, 0x00000215, 0x00000040); + nv_icmd(priv, 0x00000216, 0x00000040); + nv_icmd(priv, 0x00000217, 0x00000040); + if (nv_device(priv)->chipset >= 0xd0) { + for (i = 0x0400; i <= 0x0417; i++) + nv_icmd(priv, i, 0x00000040); + } + nv_icmd(priv, 0x00000218, 0x0000c080); + nv_icmd(priv, 0x00000219, 0x0000c080); + nv_icmd(priv, 0x0000021a, 0x0000c080); + nv_icmd(priv, 0x0000021b, 0x0000c080); + nv_icmd(priv, 0x0000021c, 0x0000c080); + nv_icmd(priv, 0x0000021d, 0x0000c080); + nv_icmd(priv, 0x0000021e, 0x0000c080); + nv_icmd(priv, 0x0000021f, 0x0000c080); + if (nv_device(priv)->chipset >= 0xd0) { + for (i = 0x0440; i <= 0x0457; i++) + nv_icmd(priv, i, 0x0000c080); + } + nv_icmd(priv, 0x000000ad, 0x0000013e); + nv_icmd(priv, 0x000000e1, 0x00000010); + nv_icmd(priv, 0x00000290, 0x00000000); + nv_icmd(priv, 0x00000291, 0x00000000); + nv_icmd(priv, 0x00000292, 0x00000000); + nv_icmd(priv, 0x00000293, 0x00000000); + nv_icmd(priv, 0x00000294, 0x00000000); + nv_icmd(priv, 0x00000295, 0x00000000); + nv_icmd(priv, 0x00000296, 0x00000000); + nv_icmd(priv, 0x00000297, 0x00000000); + nv_icmd(priv, 0x00000298, 0x00000000); + nv_icmd(priv, 0x00000299, 0x00000000); + nv_icmd(priv, 0x0000029a, 0x00000000); + nv_icmd(priv, 0x0000029b, 0x00000000); + nv_icmd(priv, 0x0000029c, 0x00000000); + nv_icmd(priv, 0x0000029d, 0x00000000); + nv_icmd(priv, 0x0000029e, 0x00000000); + nv_icmd(priv, 0x0000029f, 0x00000000); + nv_icmd(priv, 0x000003b0, 0x00000000); + nv_icmd(priv, 0x000003b1, 0x00000000); + nv_icmd(priv, 0x000003b2, 0x00000000); + nv_icmd(priv, 0x000003b3, 0x00000000); + nv_icmd(priv, 0x000003b4, 0x00000000); + nv_icmd(priv, 0x000003b5, 0x00000000); + nv_icmd(priv, 0x000003b6, 0x00000000); + nv_icmd(priv, 0x000003b7, 0x00000000); + nv_icmd(priv, 0x000003b8, 0x00000000); + nv_icmd(priv, 0x000003b9, 0x00000000); + nv_icmd(priv, 0x000003ba, 0x00000000); + nv_icmd(priv, 0x000003bb, 0x00000000); + nv_icmd(priv, 0x000003bc, 0x00000000); + nv_icmd(priv, 0x000003bd, 0x00000000); + nv_icmd(priv, 0x000003be, 0x00000000); + nv_icmd(priv, 0x000003bf, 0x00000000); + nv_icmd(priv, 0x000002a0, 0x00000000); + nv_icmd(priv, 0x000002a1, 0x00000000); + nv_icmd(priv, 0x000002a2, 0x00000000); + nv_icmd(priv, 0x000002a3, 0x00000000); + nv_icmd(priv, 0x000002a4, 0x00000000); + nv_icmd(priv, 0x000002a5, 0x00000000); + nv_icmd(priv, 0x000002a6, 0x00000000); + nv_icmd(priv, 0x000002a7, 0x00000000); + nv_icmd(priv, 0x000002a8, 0x00000000); + nv_icmd(priv, 0x000002a9, 0x00000000); + nv_icmd(priv, 0x000002aa, 0x00000000); + nv_icmd(priv, 0x000002ab, 0x00000000); + nv_icmd(priv, 0x000002ac, 0x00000000); + nv_icmd(priv, 0x000002ad, 0x00000000); + nv_icmd(priv, 0x000002ae, 0x00000000); + nv_icmd(priv, 0x000002af, 0x00000000); + nv_icmd(priv, 0x00000420, 0x00000000); + nv_icmd(priv, 0x00000421, 0x00000000); + nv_icmd(priv, 0x00000422, 0x00000000); + nv_icmd(priv, 0x00000423, 0x00000000); + nv_icmd(priv, 0x00000424, 0x00000000); + nv_icmd(priv, 0x00000425, 0x00000000); + nv_icmd(priv, 0x00000426, 0x00000000); + nv_icmd(priv, 0x00000427, 0x00000000); + nv_icmd(priv, 0x00000428, 0x00000000); + nv_icmd(priv, 0x00000429, 0x00000000); + nv_icmd(priv, 0x0000042a, 0x00000000); + nv_icmd(priv, 0x0000042b, 0x00000000); + nv_icmd(priv, 0x0000042c, 0x00000000); + nv_icmd(priv, 0x0000042d, 0x00000000); + nv_icmd(priv, 0x0000042e, 0x00000000); + nv_icmd(priv, 0x0000042f, 0x00000000); + nv_icmd(priv, 0x000002b0, 0x00000000); + nv_icmd(priv, 0x000002b1, 0x00000000); + nv_icmd(priv, 0x000002b2, 0x00000000); + nv_icmd(priv, 0x000002b3, 0x00000000); + nv_icmd(priv, 0x000002b4, 0x00000000); + nv_icmd(priv, 0x000002b5, 0x00000000); + nv_icmd(priv, 0x000002b6, 0x00000000); + nv_icmd(priv, 0x000002b7, 0x00000000); + nv_icmd(priv, 0x000002b8, 0x00000000); + nv_icmd(priv, 0x000002b9, 0x00000000); + nv_icmd(priv, 0x000002ba, 0x00000000); + nv_icmd(priv, 0x000002bb, 0x00000000); + nv_icmd(priv, 0x000002bc, 0x00000000); + nv_icmd(priv, 0x000002bd, 0x00000000); + nv_icmd(priv, 0x000002be, 0x00000000); + nv_icmd(priv, 0x000002bf, 0x00000000); + nv_icmd(priv, 0x00000430, 0x00000000); + nv_icmd(priv, 0x00000431, 0x00000000); + nv_icmd(priv, 0x00000432, 0x00000000); + nv_icmd(priv, 0x00000433, 0x00000000); + nv_icmd(priv, 0x00000434, 0x00000000); + nv_icmd(priv, 0x00000435, 0x00000000); + nv_icmd(priv, 0x00000436, 0x00000000); + nv_icmd(priv, 0x00000437, 0x00000000); + nv_icmd(priv, 0x00000438, 0x00000000); + nv_icmd(priv, 0x00000439, 0x00000000); + nv_icmd(priv, 0x0000043a, 0x00000000); + nv_icmd(priv, 0x0000043b, 0x00000000); + nv_icmd(priv, 0x0000043c, 0x00000000); + nv_icmd(priv, 0x0000043d, 0x00000000); + nv_icmd(priv, 0x0000043e, 0x00000000); + nv_icmd(priv, 0x0000043f, 0x00000000); + nv_icmd(priv, 0x000002c0, 0x00000000); + nv_icmd(priv, 0x000002c1, 0x00000000); + nv_icmd(priv, 0x000002c2, 0x00000000); + nv_icmd(priv, 0x000002c3, 0x00000000); + nv_icmd(priv, 0x000002c4, 0x00000000); + nv_icmd(priv, 0x000002c5, 0x00000000); + nv_icmd(priv, 0x000002c6, 0x00000000); + nv_icmd(priv, 0x000002c7, 0x00000000); + nv_icmd(priv, 0x000002c8, 0x00000000); + nv_icmd(priv, 0x000002c9, 0x00000000); + nv_icmd(priv, 0x000002ca, 0x00000000); + nv_icmd(priv, 0x000002cb, 0x00000000); + nv_icmd(priv, 0x000002cc, 0x00000000); + nv_icmd(priv, 0x000002cd, 0x00000000); + nv_icmd(priv, 0x000002ce, 0x00000000); + nv_icmd(priv, 0x000002cf, 0x00000000); + nv_icmd(priv, 0x000004d0, 0x00000000); + nv_icmd(priv, 0x000004d1, 0x00000000); + nv_icmd(priv, 0x000004d2, 0x00000000); + nv_icmd(priv, 0x000004d3, 0x00000000); + nv_icmd(priv, 0x000004d4, 0x00000000); + nv_icmd(priv, 0x000004d5, 0x00000000); + nv_icmd(priv, 0x000004d6, 0x00000000); + nv_icmd(priv, 0x000004d7, 0x00000000); + nv_icmd(priv, 0x000004d8, 0x00000000); + nv_icmd(priv, 0x000004d9, 0x00000000); + nv_icmd(priv, 0x000004da, 0x00000000); + nv_icmd(priv, 0x000004db, 0x00000000); + nv_icmd(priv, 0x000004dc, 0x00000000); + nv_icmd(priv, 0x000004dd, 0x00000000); + nv_icmd(priv, 0x000004de, 0x00000000); + nv_icmd(priv, 0x000004df, 0x00000000); + nv_icmd(priv, 0x00000720, 0x00000000); + nv_icmd(priv, 0x00000721, 0x00000000); + nv_icmd(priv, 0x00000722, 0x00000000); + nv_icmd(priv, 0x00000723, 0x00000000); + nv_icmd(priv, 0x00000724, 0x00000000); + nv_icmd(priv, 0x00000725, 0x00000000); + nv_icmd(priv, 0x00000726, 0x00000000); + nv_icmd(priv, 0x00000727, 0x00000000); + nv_icmd(priv, 0x00000728, 0x00000000); + nv_icmd(priv, 0x00000729, 0x00000000); + nv_icmd(priv, 0x0000072a, 0x00000000); + nv_icmd(priv, 0x0000072b, 0x00000000); + nv_icmd(priv, 0x0000072c, 0x00000000); + nv_icmd(priv, 0x0000072d, 0x00000000); + nv_icmd(priv, 0x0000072e, 0x00000000); + nv_icmd(priv, 0x0000072f, 0x00000000); + nv_icmd(priv, 0x000008c0, 0x00000000); + nv_icmd(priv, 0x000008c1, 0x00000000); + nv_icmd(priv, 0x000008c2, 0x00000000); + nv_icmd(priv, 0x000008c3, 0x00000000); + nv_icmd(priv, 0x000008c4, 0x00000000); + nv_icmd(priv, 0x000008c5, 0x00000000); + nv_icmd(priv, 0x000008c6, 0x00000000); + nv_icmd(priv, 0x000008c7, 0x00000000); + nv_icmd(priv, 0x000008c8, 0x00000000); + nv_icmd(priv, 0x000008c9, 0x00000000); + nv_icmd(priv, 0x000008ca, 0x00000000); + nv_icmd(priv, 0x000008cb, 0x00000000); + nv_icmd(priv, 0x000008cc, 0x00000000); + nv_icmd(priv, 0x000008cd, 0x00000000); + nv_icmd(priv, 0x000008ce, 0x00000000); + nv_icmd(priv, 0x000008cf, 0x00000000); + nv_icmd(priv, 0x00000890, 0x00000000); + nv_icmd(priv, 0x00000891, 0x00000000); + nv_icmd(priv, 0x00000892, 0x00000000); + nv_icmd(priv, 0x00000893, 0x00000000); + nv_icmd(priv, 0x00000894, 0x00000000); + nv_icmd(priv, 0x00000895, 0x00000000); + nv_icmd(priv, 0x00000896, 0x00000000); + nv_icmd(priv, 0x00000897, 0x00000000); + nv_icmd(priv, 0x00000898, 0x00000000); + nv_icmd(priv, 0x00000899, 0x00000000); + nv_icmd(priv, 0x0000089a, 0x00000000); + nv_icmd(priv, 0x0000089b, 0x00000000); + nv_icmd(priv, 0x0000089c, 0x00000000); + nv_icmd(priv, 0x0000089d, 0x00000000); + nv_icmd(priv, 0x0000089e, 0x00000000); + nv_icmd(priv, 0x0000089f, 0x00000000); + nv_icmd(priv, 0x000008e0, 0x00000000); + nv_icmd(priv, 0x000008e1, 0x00000000); + nv_icmd(priv, 0x000008e2, 0x00000000); + nv_icmd(priv, 0x000008e3, 0x00000000); + nv_icmd(priv, 0x000008e4, 0x00000000); + nv_icmd(priv, 0x000008e5, 0x00000000); + nv_icmd(priv, 0x000008e6, 0x00000000); + nv_icmd(priv, 0x000008e7, 0x00000000); + nv_icmd(priv, 0x000008e8, 0x00000000); + nv_icmd(priv, 0x000008e9, 0x00000000); + nv_icmd(priv, 0x000008ea, 0x00000000); + nv_icmd(priv, 0x000008eb, 0x00000000); + nv_icmd(priv, 0x000008ec, 0x00000000); + nv_icmd(priv, 0x000008ed, 0x00000000); + nv_icmd(priv, 0x000008ee, 0x00000000); + nv_icmd(priv, 0x000008ef, 0x00000000); + nv_icmd(priv, 0x000008a0, 0x00000000); + nv_icmd(priv, 0x000008a1, 0x00000000); + nv_icmd(priv, 0x000008a2, 0x00000000); + nv_icmd(priv, 0x000008a3, 0x00000000); + nv_icmd(priv, 0x000008a4, 0x00000000); + nv_icmd(priv, 0x000008a5, 0x00000000); + nv_icmd(priv, 0x000008a6, 0x00000000); + nv_icmd(priv, 0x000008a7, 0x00000000); + nv_icmd(priv, 0x000008a8, 0x00000000); + nv_icmd(priv, 0x000008a9, 0x00000000); + nv_icmd(priv, 0x000008aa, 0x00000000); + nv_icmd(priv, 0x000008ab, 0x00000000); + nv_icmd(priv, 0x000008ac, 0x00000000); + nv_icmd(priv, 0x000008ad, 0x00000000); + nv_icmd(priv, 0x000008ae, 0x00000000); + nv_icmd(priv, 0x000008af, 0x00000000); + nv_icmd(priv, 0x000008f0, 0x00000000); + nv_icmd(priv, 0x000008f1, 0x00000000); + nv_icmd(priv, 0x000008f2, 0x00000000); + nv_icmd(priv, 0x000008f3, 0x00000000); + nv_icmd(priv, 0x000008f4, 0x00000000); + nv_icmd(priv, 0x000008f5, 0x00000000); + nv_icmd(priv, 0x000008f6, 0x00000000); + nv_icmd(priv, 0x000008f7, 0x00000000); + nv_icmd(priv, 0x000008f8, 0x00000000); + nv_icmd(priv, 0x000008f9, 0x00000000); + nv_icmd(priv, 0x000008fa, 0x00000000); + nv_icmd(priv, 0x000008fb, 0x00000000); + nv_icmd(priv, 0x000008fc, 0x00000000); + nv_icmd(priv, 0x000008fd, 0x00000000); + nv_icmd(priv, 0x000008fe, 0x00000000); + nv_icmd(priv, 0x000008ff, 0x00000000); + nv_icmd(priv, 0x0000094c, 0x000000ff); + nv_icmd(priv, 0x0000094d, 0xffffffff); + nv_icmd(priv, 0x0000094e, 0x00000002); + nv_icmd(priv, 0x000002ec, 0x00000001); + nv_icmd(priv, 0x00000303, 0x00000001); + nv_icmd(priv, 0x000002e6, 0x00000001); + nv_icmd(priv, 0x00000466, 0x00000052); + nv_icmd(priv, 0x00000301, 0x3f800000); + nv_icmd(priv, 0x00000304, 0x30201000); + nv_icmd(priv, 0x00000305, 0x70605040); + nv_icmd(priv, 0x00000306, 0xb8a89888); + nv_icmd(priv, 0x00000307, 0xf8e8d8c8); + nv_icmd(priv, 0x0000030a, 0x00ffff00); + nv_icmd(priv, 0x0000030b, 0x0000001a); + nv_icmd(priv, 0x0000030c, 0x00000001); + nv_icmd(priv, 0x00000318, 0x00000001); + nv_icmd(priv, 0x00000340, 0x00000000); + nv_icmd(priv, 0x00000375, 0x00000001); + nv_icmd(priv, 0x00000351, 0x00000100); + nv_icmd(priv, 0x0000037d, 0x00000006); + nv_icmd(priv, 0x000003a0, 0x00000002); + nv_icmd(priv, 0x000003aa, 0x00000001); + nv_icmd(priv, 0x000003a9, 0x00000001); + nv_icmd(priv, 0x00000380, 0x00000001); + nv_icmd(priv, 0x00000360, 0x00000040); + nv_icmd(priv, 0x00000366, 0x00000000); + nv_icmd(priv, 0x00000367, 0x00000000); + nv_icmd(priv, 0x00000368, 0x00001fff); + nv_icmd(priv, 0x00000370, 0x00000000); + nv_icmd(priv, 0x00000371, 0x00000000); + nv_icmd(priv, 0x00000372, 0x003fffff); + nv_icmd(priv, 0x0000037a, 0x00000012); + nv_icmd(priv, 0x000005e0, 0x00000022); + nv_icmd(priv, 0x000005e1, 0x00000022); + nv_icmd(priv, 0x000005e2, 0x00000022); + nv_icmd(priv, 0x000005e3, 0x00000022); + nv_icmd(priv, 0x000005e4, 0x00000022); + nv_icmd(priv, 0x00000619, 0x00000003); + nv_icmd(priv, 0x00000811, 0x00000003); + nv_icmd(priv, 0x00000812, 0x00000004); + nv_icmd(priv, 0x00000813, 0x00000006); + nv_icmd(priv, 0x00000814, 0x00000008); + nv_icmd(priv, 0x00000815, 0x0000000b); + nv_icmd(priv, 0x00000800, 0x00000001); + nv_icmd(priv, 0x00000801, 0x00000001); + nv_icmd(priv, 0x00000802, 0x00000001); + nv_icmd(priv, 0x00000803, 0x00000001); + nv_icmd(priv, 0x00000804, 0x00000001); + nv_icmd(priv, 0x00000805, 0x00000001); + nv_icmd(priv, 0x00000632, 0x00000001); + nv_icmd(priv, 0x00000633, 0x00000002); + nv_icmd(priv, 0x00000634, 0x00000003); + nv_icmd(priv, 0x00000635, 0x00000004); + nv_icmd(priv, 0x00000654, 0x3f800000); + nv_icmd(priv, 0x00000657, 0x3f800000); + nv_icmd(priv, 0x00000655, 0x3f800000); + nv_icmd(priv, 0x00000656, 0x3f800000); + nv_icmd(priv, 0x000006cd, 0x3f800000); + nv_icmd(priv, 0x000007f5, 0x3f800000); + nv_icmd(priv, 0x000007dc, 0x39291909); + nv_icmd(priv, 0x000007dd, 0x79695949); + nv_icmd(priv, 0x000007de, 0xb9a99989); + nv_icmd(priv, 0x000007df, 0xf9e9d9c9); + nv_icmd(priv, 0x000007e8, 0x00003210); + nv_icmd(priv, 0x000007e9, 0x00007654); + nv_icmd(priv, 0x000007ea, 0x00000098); + nv_icmd(priv, 0x000007ec, 0x39291909); + nv_icmd(priv, 0x000007ed, 0x79695949); + nv_icmd(priv, 0x000007ee, 0xb9a99989); + nv_icmd(priv, 0x000007ef, 0xf9e9d9c9); + nv_icmd(priv, 0x000007f0, 0x00003210); + nv_icmd(priv, 0x000007f1, 0x00007654); + nv_icmd(priv, 0x000007f2, 0x00000098); + nv_icmd(priv, 0x000005a5, 0x00000001); + nv_icmd(priv, 0x00000980, 0x00000000); + nv_icmd(priv, 0x00000981, 0x00000000); + nv_icmd(priv, 0x00000982, 0x00000000); + nv_icmd(priv, 0x00000983, 0x00000000); + nv_icmd(priv, 0x00000984, 0x00000000); + nv_icmd(priv, 0x00000985, 0x00000000); + nv_icmd(priv, 0x00000986, 0x00000000); + nv_icmd(priv, 0x00000987, 0x00000000); + nv_icmd(priv, 0x00000988, 0x00000000); + nv_icmd(priv, 0x00000989, 0x00000000); + nv_icmd(priv, 0x0000098a, 0x00000000); + nv_icmd(priv, 0x0000098b, 0x00000000); + nv_icmd(priv, 0x0000098c, 0x00000000); + nv_icmd(priv, 0x0000098d, 0x00000000); + nv_icmd(priv, 0x0000098e, 0x00000000); + nv_icmd(priv, 0x0000098f, 0x00000000); + nv_icmd(priv, 0x00000990, 0x00000000); + nv_icmd(priv, 0x00000991, 0x00000000); + nv_icmd(priv, 0x00000992, 0x00000000); + nv_icmd(priv, 0x00000993, 0x00000000); + nv_icmd(priv, 0x00000994, 0x00000000); + nv_icmd(priv, 0x00000995, 0x00000000); + nv_icmd(priv, 0x00000996, 0x00000000); + nv_icmd(priv, 0x00000997, 0x00000000); + nv_icmd(priv, 0x00000998, 0x00000000); + nv_icmd(priv, 0x00000999, 0x00000000); + nv_icmd(priv, 0x0000099a, 0x00000000); + nv_icmd(priv, 0x0000099b, 0x00000000); + nv_icmd(priv, 0x0000099c, 0x00000000); + nv_icmd(priv, 0x0000099d, 0x00000000); + nv_icmd(priv, 0x0000099e, 0x00000000); + nv_icmd(priv, 0x0000099f, 0x00000000); + nv_icmd(priv, 0x000009a0, 0x00000000); + nv_icmd(priv, 0x000009a1, 0x00000000); + nv_icmd(priv, 0x000009a2, 0x00000000); + nv_icmd(priv, 0x000009a3, 0x00000000); + nv_icmd(priv, 0x000009a4, 0x00000000); + nv_icmd(priv, 0x000009a5, 0x00000000); + nv_icmd(priv, 0x000009a6, 0x00000000); + nv_icmd(priv, 0x000009a7, 0x00000000); + nv_icmd(priv, 0x000009a8, 0x00000000); + nv_icmd(priv, 0x000009a9, 0x00000000); + nv_icmd(priv, 0x000009aa, 0x00000000); + nv_icmd(priv, 0x000009ab, 0x00000000); + nv_icmd(priv, 0x000009ac, 0x00000000); + nv_icmd(priv, 0x000009ad, 0x00000000); + nv_icmd(priv, 0x000009ae, 0x00000000); + nv_icmd(priv, 0x000009af, 0x00000000); + nv_icmd(priv, 0x000009b0, 0x00000000); + nv_icmd(priv, 0x000009b1, 0x00000000); + nv_icmd(priv, 0x000009b2, 0x00000000); + nv_icmd(priv, 0x000009b3, 0x00000000); + nv_icmd(priv, 0x000009b4, 0x00000000); + nv_icmd(priv, 0x000009b5, 0x00000000); + nv_icmd(priv, 0x000009b6, 0x00000000); + nv_icmd(priv, 0x000009b7, 0x00000000); + nv_icmd(priv, 0x000009b8, 0x00000000); + nv_icmd(priv, 0x000009b9, 0x00000000); + nv_icmd(priv, 0x000009ba, 0x00000000); + nv_icmd(priv, 0x000009bb, 0x00000000); + nv_icmd(priv, 0x000009bc, 0x00000000); + nv_icmd(priv, 0x000009bd, 0x00000000); + nv_icmd(priv, 0x000009be, 0x00000000); + nv_icmd(priv, 0x000009bf, 0x00000000); + nv_icmd(priv, 0x000009c0, 0x00000000); + nv_icmd(priv, 0x000009c1, 0x00000000); + nv_icmd(priv, 0x000009c2, 0x00000000); + nv_icmd(priv, 0x000009c3, 0x00000000); + nv_icmd(priv, 0x000009c4, 0x00000000); + nv_icmd(priv, 0x000009c5, 0x00000000); + nv_icmd(priv, 0x000009c6, 0x00000000); + nv_icmd(priv, 0x000009c7, 0x00000000); + nv_icmd(priv, 0x000009c8, 0x00000000); + nv_icmd(priv, 0x000009c9, 0x00000000); + nv_icmd(priv, 0x000009ca, 0x00000000); + nv_icmd(priv, 0x000009cb, 0x00000000); + nv_icmd(priv, 0x000009cc, 0x00000000); + nv_icmd(priv, 0x000009cd, 0x00000000); + nv_icmd(priv, 0x000009ce, 0x00000000); + nv_icmd(priv, 0x000009cf, 0x00000000); + nv_icmd(priv, 0x000009d0, 0x00000000); + nv_icmd(priv, 0x000009d1, 0x00000000); + nv_icmd(priv, 0x000009d2, 0x00000000); + nv_icmd(priv, 0x000009d3, 0x00000000); + nv_icmd(priv, 0x000009d4, 0x00000000); + nv_icmd(priv, 0x000009d5, 0x00000000); + nv_icmd(priv, 0x000009d6, 0x00000000); + nv_icmd(priv, 0x000009d7, 0x00000000); + nv_icmd(priv, 0x000009d8, 0x00000000); + nv_icmd(priv, 0x000009d9, 0x00000000); + nv_icmd(priv, 0x000009da, 0x00000000); + nv_icmd(priv, 0x000009db, 0x00000000); + nv_icmd(priv, 0x000009dc, 0x00000000); + nv_icmd(priv, 0x000009dd, 0x00000000); + nv_icmd(priv, 0x000009de, 0x00000000); + nv_icmd(priv, 0x000009df, 0x00000000); + nv_icmd(priv, 0x000009e0, 0x00000000); + nv_icmd(priv, 0x000009e1, 0x00000000); + nv_icmd(priv, 0x000009e2, 0x00000000); + nv_icmd(priv, 0x000009e3, 0x00000000); + nv_icmd(priv, 0x000009e4, 0x00000000); + nv_icmd(priv, 0x000009e5, 0x00000000); + nv_icmd(priv, 0x000009e6, 0x00000000); + nv_icmd(priv, 0x000009e7, 0x00000000); + nv_icmd(priv, 0x000009e8, 0x00000000); + nv_icmd(priv, 0x000009e9, 0x00000000); + nv_icmd(priv, 0x000009ea, 0x00000000); + nv_icmd(priv, 0x000009eb, 0x00000000); + nv_icmd(priv, 0x000009ec, 0x00000000); + nv_icmd(priv, 0x000009ed, 0x00000000); + nv_icmd(priv, 0x000009ee, 0x00000000); + nv_icmd(priv, 0x000009ef, 0x00000000); + nv_icmd(priv, 0x000009f0, 0x00000000); + nv_icmd(priv, 0x000009f1, 0x00000000); + nv_icmd(priv, 0x000009f2, 0x00000000); + nv_icmd(priv, 0x000009f3, 0x00000000); + nv_icmd(priv, 0x000009f4, 0x00000000); + nv_icmd(priv, 0x000009f5, 0x00000000); + nv_icmd(priv, 0x000009f6, 0x00000000); + nv_icmd(priv, 0x000009f7, 0x00000000); + nv_icmd(priv, 0x000009f8, 0x00000000); + nv_icmd(priv, 0x000009f9, 0x00000000); + nv_icmd(priv, 0x000009fa, 0x00000000); + nv_icmd(priv, 0x000009fb, 0x00000000); + nv_icmd(priv, 0x000009fc, 0x00000000); + nv_icmd(priv, 0x000009fd, 0x00000000); + nv_icmd(priv, 0x000009fe, 0x00000000); + nv_icmd(priv, 0x000009ff, 0x00000000); + nv_icmd(priv, 0x00000468, 0x00000004); + nv_icmd(priv, 0x0000046c, 0x00000001); + nv_icmd(priv, 0x00000470, 0x00000000); + nv_icmd(priv, 0x00000471, 0x00000000); + nv_icmd(priv, 0x00000472, 0x00000000); + nv_icmd(priv, 0x00000473, 0x00000000); + nv_icmd(priv, 0x00000474, 0x00000000); + nv_icmd(priv, 0x00000475, 0x00000000); + nv_icmd(priv, 0x00000476, 0x00000000); + nv_icmd(priv, 0x00000477, 0x00000000); + nv_icmd(priv, 0x00000478, 0x00000000); + nv_icmd(priv, 0x00000479, 0x00000000); + nv_icmd(priv, 0x0000047a, 0x00000000); + nv_icmd(priv, 0x0000047b, 0x00000000); + nv_icmd(priv, 0x0000047c, 0x00000000); + nv_icmd(priv, 0x0000047d, 0x00000000); + nv_icmd(priv, 0x0000047e, 0x00000000); + nv_icmd(priv, 0x0000047f, 0x00000000); + nv_icmd(priv, 0x00000480, 0x00000000); + nv_icmd(priv, 0x00000481, 0x00000000); + nv_icmd(priv, 0x00000482, 0x00000000); + nv_icmd(priv, 0x00000483, 0x00000000); + nv_icmd(priv, 0x00000484, 0x00000000); + nv_icmd(priv, 0x00000485, 0x00000000); + nv_icmd(priv, 0x00000486, 0x00000000); + nv_icmd(priv, 0x00000487, 0x00000000); + nv_icmd(priv, 0x00000488, 0x00000000); + nv_icmd(priv, 0x00000489, 0x00000000); + nv_icmd(priv, 0x0000048a, 0x00000000); + nv_icmd(priv, 0x0000048b, 0x00000000); + nv_icmd(priv, 0x0000048c, 0x00000000); + nv_icmd(priv, 0x0000048d, 0x00000000); + nv_icmd(priv, 0x0000048e, 0x00000000); + nv_icmd(priv, 0x0000048f, 0x00000000); + nv_icmd(priv, 0x00000490, 0x00000000); + nv_icmd(priv, 0x00000491, 0x00000000); + nv_icmd(priv, 0x00000492, 0x00000000); + nv_icmd(priv, 0x00000493, 0x00000000); + nv_icmd(priv, 0x00000494, 0x00000000); + nv_icmd(priv, 0x00000495, 0x00000000); + nv_icmd(priv, 0x00000496, 0x00000000); + nv_icmd(priv, 0x00000497, 0x00000000); + nv_icmd(priv, 0x00000498, 0x00000000); + nv_icmd(priv, 0x00000499, 0x00000000); + nv_icmd(priv, 0x0000049a, 0x00000000); + nv_icmd(priv, 0x0000049b, 0x00000000); + nv_icmd(priv, 0x0000049c, 0x00000000); + nv_icmd(priv, 0x0000049d, 0x00000000); + nv_icmd(priv, 0x0000049e, 0x00000000); + nv_icmd(priv, 0x0000049f, 0x00000000); + nv_icmd(priv, 0x000004a0, 0x00000000); + nv_icmd(priv, 0x000004a1, 0x00000000); + nv_icmd(priv, 0x000004a2, 0x00000000); + nv_icmd(priv, 0x000004a3, 0x00000000); + nv_icmd(priv, 0x000004a4, 0x00000000); + nv_icmd(priv, 0x000004a5, 0x00000000); + nv_icmd(priv, 0x000004a6, 0x00000000); + nv_icmd(priv, 0x000004a7, 0x00000000); + nv_icmd(priv, 0x000004a8, 0x00000000); + nv_icmd(priv, 0x000004a9, 0x00000000); + nv_icmd(priv, 0x000004aa, 0x00000000); + nv_icmd(priv, 0x000004ab, 0x00000000); + nv_icmd(priv, 0x000004ac, 0x00000000); + nv_icmd(priv, 0x000004ad, 0x00000000); + nv_icmd(priv, 0x000004ae, 0x00000000); + nv_icmd(priv, 0x000004af, 0x00000000); + nv_icmd(priv, 0x000004b0, 0x00000000); + nv_icmd(priv, 0x000004b1, 0x00000000); + nv_icmd(priv, 0x000004b2, 0x00000000); + nv_icmd(priv, 0x000004b3, 0x00000000); + nv_icmd(priv, 0x000004b4, 0x00000000); + nv_icmd(priv, 0x000004b5, 0x00000000); + nv_icmd(priv, 0x000004b6, 0x00000000); + nv_icmd(priv, 0x000004b7, 0x00000000); + nv_icmd(priv, 0x000004b8, 0x00000000); + nv_icmd(priv, 0x000004b9, 0x00000000); + nv_icmd(priv, 0x000004ba, 0x00000000); + nv_icmd(priv, 0x000004bb, 0x00000000); + nv_icmd(priv, 0x000004bc, 0x00000000); + nv_icmd(priv, 0x000004bd, 0x00000000); + nv_icmd(priv, 0x000004be, 0x00000000); + nv_icmd(priv, 0x000004bf, 0x00000000); + nv_icmd(priv, 0x000004c0, 0x00000000); + nv_icmd(priv, 0x000004c1, 0x00000000); + nv_icmd(priv, 0x000004c2, 0x00000000); + nv_icmd(priv, 0x000004c3, 0x00000000); + nv_icmd(priv, 0x000004c4, 0x00000000); + nv_icmd(priv, 0x000004c5, 0x00000000); + nv_icmd(priv, 0x000004c6, 0x00000000); + nv_icmd(priv, 0x000004c7, 0x00000000); + nv_icmd(priv, 0x000004c8, 0x00000000); + nv_icmd(priv, 0x000004c9, 0x00000000); + nv_icmd(priv, 0x000004ca, 0x00000000); + nv_icmd(priv, 0x000004cb, 0x00000000); + nv_icmd(priv, 0x000004cc, 0x00000000); + nv_icmd(priv, 0x000004cd, 0x00000000); + nv_icmd(priv, 0x000004ce, 0x00000000); + nv_icmd(priv, 0x000004cf, 0x00000000); + nv_icmd(priv, 0x00000510, 0x3f800000); + nv_icmd(priv, 0x00000511, 0x3f800000); + nv_icmd(priv, 0x00000512, 0x3f800000); + nv_icmd(priv, 0x00000513, 0x3f800000); + nv_icmd(priv, 0x00000514, 0x3f800000); + nv_icmd(priv, 0x00000515, 0x3f800000); + nv_icmd(priv, 0x00000516, 0x3f800000); + nv_icmd(priv, 0x00000517, 0x3f800000); + nv_icmd(priv, 0x00000518, 0x3f800000); + nv_icmd(priv, 0x00000519, 0x3f800000); + nv_icmd(priv, 0x0000051a, 0x3f800000); + nv_icmd(priv, 0x0000051b, 0x3f800000); + nv_icmd(priv, 0x0000051c, 0x3f800000); + nv_icmd(priv, 0x0000051d, 0x3f800000); + nv_icmd(priv, 0x0000051e, 0x3f800000); + nv_icmd(priv, 0x0000051f, 0x3f800000); + nv_icmd(priv, 0x00000520, 0x000002b6); + nv_icmd(priv, 0x00000529, 0x00000001); + nv_icmd(priv, 0x00000530, 0xffff0000); + nv_icmd(priv, 0x00000531, 0xffff0000); + nv_icmd(priv, 0x00000532, 0xffff0000); + nv_icmd(priv, 0x00000533, 0xffff0000); + nv_icmd(priv, 0x00000534, 0xffff0000); + nv_icmd(priv, 0x00000535, 0xffff0000); + nv_icmd(priv, 0x00000536, 0xffff0000); + nv_icmd(priv, 0x00000537, 0xffff0000); + nv_icmd(priv, 0x00000538, 0xffff0000); + nv_icmd(priv, 0x00000539, 0xffff0000); + nv_icmd(priv, 0x0000053a, 0xffff0000); + nv_icmd(priv, 0x0000053b, 0xffff0000); + nv_icmd(priv, 0x0000053c, 0xffff0000); + nv_icmd(priv, 0x0000053d, 0xffff0000); + nv_icmd(priv, 0x0000053e, 0xffff0000); + nv_icmd(priv, 0x0000053f, 0xffff0000); + nv_icmd(priv, 0x00000585, 0x0000003f); + nv_icmd(priv, 0x00000576, 0x00000003); + if (nv_device(priv)->chipset == 0xc1 || + nv_device(priv)->chipset >= 0xd0) + nv_icmd(priv, 0x0000057b, 0x00000059); + nv_icmd(priv, 0x00000586, 0x00000040); + nv_icmd(priv, 0x00000582, 0x00000080); + nv_icmd(priv, 0x00000583, 0x00000080); + nv_icmd(priv, 0x000005c2, 0x00000001); + nv_icmd(priv, 0x00000638, 0x00000001); + nv_icmd(priv, 0x00000639, 0x00000001); + nv_icmd(priv, 0x0000063a, 0x00000002); + nv_icmd(priv, 0x0000063b, 0x00000001); + nv_icmd(priv, 0x0000063c, 0x00000001); + nv_icmd(priv, 0x0000063d, 0x00000002); + nv_icmd(priv, 0x0000063e, 0x00000001); + nv_icmd(priv, 0x000008b8, 0x00000001); + nv_icmd(priv, 0x000008b9, 0x00000001); + nv_icmd(priv, 0x000008ba, 0x00000001); + nv_icmd(priv, 0x000008bb, 0x00000001); + nv_icmd(priv, 0x000008bc, 0x00000001); + nv_icmd(priv, 0x000008bd, 0x00000001); + nv_icmd(priv, 0x000008be, 0x00000001); + nv_icmd(priv, 0x000008bf, 0x00000001); + nv_icmd(priv, 0x00000900, 0x00000001); + nv_icmd(priv, 0x00000901, 0x00000001); + nv_icmd(priv, 0x00000902, 0x00000001); + nv_icmd(priv, 0x00000903, 0x00000001); + nv_icmd(priv, 0x00000904, 0x00000001); + nv_icmd(priv, 0x00000905, 0x00000001); + nv_icmd(priv, 0x00000906, 0x00000001); + nv_icmd(priv, 0x00000907, 0x00000001); + nv_icmd(priv, 0x00000908, 0x00000002); + nv_icmd(priv, 0x00000909, 0x00000002); + nv_icmd(priv, 0x0000090a, 0x00000002); + nv_icmd(priv, 0x0000090b, 0x00000002); + nv_icmd(priv, 0x0000090c, 0x00000002); + nv_icmd(priv, 0x0000090d, 0x00000002); + nv_icmd(priv, 0x0000090e, 0x00000002); + nv_icmd(priv, 0x0000090f, 0x00000002); + nv_icmd(priv, 0x00000910, 0x00000001); + nv_icmd(priv, 0x00000911, 0x00000001); + nv_icmd(priv, 0x00000912, 0x00000001); + nv_icmd(priv, 0x00000913, 0x00000001); + nv_icmd(priv, 0x00000914, 0x00000001); + nv_icmd(priv, 0x00000915, 0x00000001); + nv_icmd(priv, 0x00000916, 0x00000001); + nv_icmd(priv, 0x00000917, 0x00000001); + nv_icmd(priv, 0x00000918, 0x00000001); + nv_icmd(priv, 0x00000919, 0x00000001); + nv_icmd(priv, 0x0000091a, 0x00000001); + nv_icmd(priv, 0x0000091b, 0x00000001); + nv_icmd(priv, 0x0000091c, 0x00000001); + nv_icmd(priv, 0x0000091d, 0x00000001); + nv_icmd(priv, 0x0000091e, 0x00000001); + nv_icmd(priv, 0x0000091f, 0x00000001); + nv_icmd(priv, 0x00000920, 0x00000002); + nv_icmd(priv, 0x00000921, 0x00000002); + nv_icmd(priv, 0x00000922, 0x00000002); + nv_icmd(priv, 0x00000923, 0x00000002); + nv_icmd(priv, 0x00000924, 0x00000002); + nv_icmd(priv, 0x00000925, 0x00000002); + nv_icmd(priv, 0x00000926, 0x00000002); + nv_icmd(priv, 0x00000927, 0x00000002); + nv_icmd(priv, 0x00000928, 0x00000001); + nv_icmd(priv, 0x00000929, 0x00000001); + nv_icmd(priv, 0x0000092a, 0x00000001); + nv_icmd(priv, 0x0000092b, 0x00000001); + nv_icmd(priv, 0x0000092c, 0x00000001); + nv_icmd(priv, 0x0000092d, 0x00000001); + nv_icmd(priv, 0x0000092e, 0x00000001); + nv_icmd(priv, 0x0000092f, 0x00000001); + nv_icmd(priv, 0x00000648, 0x00000001); + nv_icmd(priv, 0x00000649, 0x00000001); + nv_icmd(priv, 0x0000064a, 0x00000001); + nv_icmd(priv, 0x0000064b, 0x00000001); + nv_icmd(priv, 0x0000064c, 0x00000001); + nv_icmd(priv, 0x0000064d, 0x00000001); + nv_icmd(priv, 0x0000064e, 0x00000001); + nv_icmd(priv, 0x0000064f, 0x00000001); + nv_icmd(priv, 0x00000650, 0x00000001); + nv_icmd(priv, 0x00000658, 0x0000000f); + nv_icmd(priv, 0x000007ff, 0x0000000a); + nv_icmd(priv, 0x0000066a, 0x40000000); + nv_icmd(priv, 0x0000066b, 0x10000000); + nv_icmd(priv, 0x0000066c, 0xffff0000); + nv_icmd(priv, 0x0000066d, 0xffff0000); + nv_icmd(priv, 0x000007af, 0x00000008); + nv_icmd(priv, 0x000007b0, 0x00000008); + nv_icmd(priv, 0x000007f6, 0x00000001); + nv_icmd(priv, 0x000006b2, 0x00000055); + nv_icmd(priv, 0x000007ad, 0x00000003); + nv_icmd(priv, 0x00000937, 0x00000001); + nv_icmd(priv, 0x00000971, 0x00000008); + nv_icmd(priv, 0x00000972, 0x00000040); + nv_icmd(priv, 0x00000973, 0x0000012c); + nv_icmd(priv, 0x0000097c, 0x00000040); + nv_icmd(priv, 0x00000979, 0x00000003); + nv_icmd(priv, 0x00000975, 0x00000020); + nv_icmd(priv, 0x00000976, 0x00000001); + nv_icmd(priv, 0x00000977, 0x00000020); + nv_icmd(priv, 0x00000978, 0x00000001); + nv_icmd(priv, 0x00000957, 0x00000003); + nv_icmd(priv, 0x0000095e, 0x20164010); + nv_icmd(priv, 0x0000095f, 0x00000020); + if (nv_device(priv)->chipset >= 0xd0) + nv_icmd(priv, 0x0000097d, 0x00000020); + nv_icmd(priv, 0x00000683, 0x00000006); + nv_icmd(priv, 0x00000685, 0x003fffff); + nv_icmd(priv, 0x00000687, 0x00000c48); + nv_icmd(priv, 0x000006a0, 0x00000005); + nv_icmd(priv, 0x00000840, 0x00300008); + nv_icmd(priv, 0x00000841, 0x04000080); + nv_icmd(priv, 0x00000842, 0x00300008); + nv_icmd(priv, 0x00000843, 0x04000080); + nv_icmd(priv, 0x00000818, 0x00000000); + nv_icmd(priv, 0x00000819, 0x00000000); + nv_icmd(priv, 0x0000081a, 0x00000000); + nv_icmd(priv, 0x0000081b, 0x00000000); + nv_icmd(priv, 0x0000081c, 0x00000000); + nv_icmd(priv, 0x0000081d, 0x00000000); + nv_icmd(priv, 0x0000081e, 0x00000000); + nv_icmd(priv, 0x0000081f, 0x00000000); + nv_icmd(priv, 0x00000848, 0x00000000); + nv_icmd(priv, 0x00000849, 0x00000000); + nv_icmd(priv, 0x0000084a, 0x00000000); + nv_icmd(priv, 0x0000084b, 0x00000000); + nv_icmd(priv, 0x0000084c, 0x00000000); + nv_icmd(priv, 0x0000084d, 0x00000000); + nv_icmd(priv, 0x0000084e, 0x00000000); + nv_icmd(priv, 0x0000084f, 0x00000000); + nv_icmd(priv, 0x00000850, 0x00000000); + nv_icmd(priv, 0x00000851, 0x00000000); + nv_icmd(priv, 0x00000852, 0x00000000); + nv_icmd(priv, 0x00000853, 0x00000000); + nv_icmd(priv, 0x00000854, 0x00000000); + nv_icmd(priv, 0x00000855, 0x00000000); + nv_icmd(priv, 0x00000856, 0x00000000); + nv_icmd(priv, 0x00000857, 0x00000000); + nv_icmd(priv, 0x00000738, 0x00000000); + nv_icmd(priv, 0x000006aa, 0x00000001); + nv_icmd(priv, 0x000006ab, 0x00000002); + nv_icmd(priv, 0x000006ac, 0x00000080); + nv_icmd(priv, 0x000006ad, 0x00000100); + nv_icmd(priv, 0x000006ae, 0x00000100); + nv_icmd(priv, 0x000006b1, 0x00000011); + nv_icmd(priv, 0x000006bb, 0x000000cf); + nv_icmd(priv, 0x000006ce, 0x2a712488); + nv_icmd(priv, 0x00000739, 0x4085c000); + nv_icmd(priv, 0x0000073a, 0x00000080); + nv_icmd(priv, 0x00000786, 0x80000100); + nv_icmd(priv, 0x0000073c, 0x00010100); + nv_icmd(priv, 0x0000073d, 0x02800000); + nv_icmd(priv, 0x00000787, 0x000000cf); + nv_icmd(priv, 0x0000078c, 0x00000008); + nv_icmd(priv, 0x00000792, 0x00000001); + nv_icmd(priv, 0x00000794, 0x00000001); + nv_icmd(priv, 0x00000795, 0x00000001); + nv_icmd(priv, 0x00000796, 0x00000001); + nv_icmd(priv, 0x00000797, 0x000000cf); + nv_icmd(priv, 0x00000836, 0x00000001); + nv_icmd(priv, 0x0000079a, 0x00000002); + nv_icmd(priv, 0x00000833, 0x04444480); + nv_icmd(priv, 0x000007a1, 0x00000001); + nv_icmd(priv, 0x000007a3, 0x00000001); + nv_icmd(priv, 0x000007a4, 0x00000001); + nv_icmd(priv, 0x000007a5, 0x00000001); + nv_icmd(priv, 0x00000831, 0x00000004); + nv_icmd(priv, 0x0000080c, 0x00000002); + nv_icmd(priv, 0x0000080d, 0x00000100); + nv_icmd(priv, 0x0000080e, 0x00000100); + nv_icmd(priv, 0x0000080f, 0x00000001); + nv_icmd(priv, 0x00000823, 0x00000002); + nv_icmd(priv, 0x00000824, 0x00000100); + nv_icmd(priv, 0x00000825, 0x00000100); + nv_icmd(priv, 0x00000826, 0x00000001); + nv_icmd(priv, 0x0000095d, 0x00000001); + nv_icmd(priv, 0x0000082b, 0x00000004); + nv_icmd(priv, 0x00000942, 0x00010001); + nv_icmd(priv, 0x00000943, 0x00000001); + nv_icmd(priv, 0x00000944, 0x00000022); + nv_icmd(priv, 0x000007c5, 0x00010001); + nv_icmd(priv, 0x00000834, 0x00000001); + nv_icmd(priv, 0x000007c7, 0x00000001); + nv_icmd(priv, 0x0000c1b0, 0x0000000f); + nv_icmd(priv, 0x0000c1b1, 0x0000000f); + nv_icmd(priv, 0x0000c1b2, 0x0000000f); + nv_icmd(priv, 0x0000c1b3, 0x0000000f); + nv_icmd(priv, 0x0000c1b4, 0x0000000f); + nv_icmd(priv, 0x0000c1b5, 0x0000000f); + nv_icmd(priv, 0x0000c1b6, 0x0000000f); + nv_icmd(priv, 0x0000c1b7, 0x0000000f); + nv_icmd(priv, 0x0000c1b8, 0x0fac6881); + nv_icmd(priv, 0x0000c1b9, 0x00fac688); + nv_icmd(priv, 0x0001e100, 0x00000001); + nv_icmd(priv, 0x00001000, 0x00000002); + nv_icmd(priv, 0x000006aa, 0x00000001); + nv_icmd(priv, 0x000006ad, 0x00000100); + nv_icmd(priv, 0x000006ae, 0x00000100); + nv_icmd(priv, 0x000006b1, 0x00000011); + nv_icmd(priv, 0x0000078c, 0x00000008); + nv_icmd(priv, 0x00000792, 0x00000001); + nv_icmd(priv, 0x00000794, 0x00000001); + nv_icmd(priv, 0x00000795, 0x00000001); + nv_icmd(priv, 0x00000796, 0x00000001); + nv_icmd(priv, 0x00000797, 0x000000cf); + nv_icmd(priv, 0x0000079a, 0x00000002); + nv_icmd(priv, 0x00000833, 0x04444480); + nv_icmd(priv, 0x000007a1, 0x00000001); + nv_icmd(priv, 0x000007a3, 0x00000001); + nv_icmd(priv, 0x000007a4, 0x00000001); + nv_icmd(priv, 0x000007a5, 0x00000001); + nv_icmd(priv, 0x00000831, 0x00000004); + nv_icmd(priv, 0x0001e100, 0x00000001); + nv_icmd(priv, 0x00001000, 0x00000014); + nv_icmd(priv, 0x00000351, 0x00000100); + nv_icmd(priv, 0x00000957, 0x00000003); + nv_icmd(priv, 0x0000095d, 0x00000001); + nv_icmd(priv, 0x0000082b, 0x00000004); + nv_icmd(priv, 0x00000942, 0x00010001); + nv_icmd(priv, 0x00000943, 0x00000001); + nv_icmd(priv, 0x000007c5, 0x00010001); + nv_icmd(priv, 0x00000834, 0x00000001); + nv_icmd(priv, 0x000007c7, 0x00000001); + nv_icmd(priv, 0x0001e100, 0x00000001); + nv_icmd(priv, 0x00001000, 0x00000001); + nv_icmd(priv, 0x0000080c, 0x00000002); + nv_icmd(priv, 0x0000080d, 0x00000100); + nv_icmd(priv, 0x0000080e, 0x00000100); + nv_icmd(priv, 0x0000080f, 0x00000001); + nv_icmd(priv, 0x00000823, 0x00000002); + nv_icmd(priv, 0x00000824, 0x00000100); + nv_icmd(priv, 0x00000825, 0x00000100); + nv_icmd(priv, 0x00000826, 0x00000001); + nv_icmd(priv, 0x0001e100, 0x00000001); + nv_wr32(priv, 0x400208, 0x00000000); + nv_wr32(priv, 0x404154, 0x00000400); + + nvc0_grctx_generate_9097(priv); + if (fermi >= 0x9197) + nvc0_grctx_generate_9197(priv); + if (fermi >= 0x9297) + nvc0_grctx_generate_9297(priv); + nvc0_grctx_generate_902d(priv); + nvc0_grctx_generate_9039(priv); + nvc0_grctx_generate_90c0(priv); + + nv_wr32(priv, 0x000260, r000260); + + return nvc0_grctx_fini(&info); +} diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c new file mode 100644 index 0000000..ae27dae --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c @@ -0,0 +1,2793 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "nvc0.h" + +static void +nve0_grctx_generate_icmd(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x400208, 0x80000000); + nv_icmd(priv, 0x001000, 0x00000004); + nv_icmd(priv, 0x000039, 0x00000000); + nv_icmd(priv, 0x00003a, 0x00000000); + nv_icmd(priv, 0x00003b, 0x00000000); + nv_icmd(priv, 0x0000a9, 0x0000ffff); + nv_icmd(priv, 0x000038, 0x0fac6881); + nv_icmd(priv, 0x00003d, 0x00000001); + nv_icmd(priv, 0x0000e8, 0x00000400); + nv_icmd(priv, 0x0000e9, 0x00000400); + nv_icmd(priv, 0x0000ea, 0x00000400); + nv_icmd(priv, 0x0000eb, 0x00000400); + nv_icmd(priv, 0x0000ec, 0x00000400); + nv_icmd(priv, 0x0000ed, 0x00000400); + nv_icmd(priv, 0x0000ee, 0x00000400); + nv_icmd(priv, 0x0000ef, 0x00000400); + nv_icmd(priv, 0x000078, 0x00000300); + nv_icmd(priv, 0x000079, 0x00000300); + nv_icmd(priv, 0x00007a, 0x00000300); + nv_icmd(priv, 0x00007b, 0x00000300); + nv_icmd(priv, 0x00007c, 0x00000300); + nv_icmd(priv, 0x00007d, 0x00000300); + nv_icmd(priv, 0x00007e, 0x00000300); + nv_icmd(priv, 0x00007f, 0x00000300); + nv_icmd(priv, 0x000050, 0x00000011); + nv_icmd(priv, 0x000058, 0x00000008); + nv_icmd(priv, 0x000059, 0x00000008); + nv_icmd(priv, 0x00005a, 0x00000008); + nv_icmd(priv, 0x00005b, 0x00000008); + nv_icmd(priv, 0x00005c, 0x00000008); + nv_icmd(priv, 0x00005d, 0x00000008); + nv_icmd(priv, 0x00005e, 0x00000008); + nv_icmd(priv, 0x00005f, 0x00000008); + nv_icmd(priv, 0x000208, 0x00000001); + nv_icmd(priv, 0x000209, 0x00000001); + nv_icmd(priv, 0x00020a, 0x00000001); + nv_icmd(priv, 0x00020b, 0x00000001); + nv_icmd(priv, 0x00020c, 0x00000001); + nv_icmd(priv, 0x00020d, 0x00000001); + nv_icmd(priv, 0x00020e, 0x00000001); + nv_icmd(priv, 0x00020f, 0x00000001); + nv_icmd(priv, 0x000081, 0x00000001); + nv_icmd(priv, 0x000085, 0x00000004); + nv_icmd(priv, 0x000088, 0x00000400); + nv_icmd(priv, 0x000090, 0x00000300); + nv_icmd(priv, 0x000098, 0x00001001); + nv_icmd(priv, 0x0000e3, 0x00000001); + nv_icmd(priv, 0x0000da, 0x00000001); + nv_icmd(priv, 0x0000f8, 0x00000003); + nv_icmd(priv, 0x0000fa, 0x00000001); + nv_icmd(priv, 0x00009f, 0x0000ffff); + nv_icmd(priv, 0x0000a0, 0x0000ffff); + nv_icmd(priv, 0x0000a1, 0x0000ffff); + nv_icmd(priv, 0x0000a2, 0x0000ffff); + nv_icmd(priv, 0x0000b1, 0x00000001); + nv_icmd(priv, 0x0000ad, 0x0000013e); + nv_icmd(priv, 0x0000e1, 0x00000010); + nv_icmd(priv, 0x000290, 0x00000000); + nv_icmd(priv, 0x000291, 0x00000000); + nv_icmd(priv, 0x000292, 0x00000000); + nv_icmd(priv, 0x000293, 0x00000000); + nv_icmd(priv, 0x000294, 0x00000000); + nv_icmd(priv, 0x000295, 0x00000000); + nv_icmd(priv, 0x000296, 0x00000000); + nv_icmd(priv, 0x000297, 0x00000000); + nv_icmd(priv, 0x000298, 0x00000000); + nv_icmd(priv, 0x000299, 0x00000000); + nv_icmd(priv, 0x00029a, 0x00000000); + nv_icmd(priv, 0x00029b, 0x00000000); + nv_icmd(priv, 0x00029c, 0x00000000); + nv_icmd(priv, 0x00029d, 0x00000000); + nv_icmd(priv, 0x00029e, 0x00000000); + nv_icmd(priv, 0x00029f, 0x00000000); + nv_icmd(priv, 0x0003b0, 0x00000000); + nv_icmd(priv, 0x0003b1, 0x00000000); + nv_icmd(priv, 0x0003b2, 0x00000000); + nv_icmd(priv, 0x0003b3, 0x00000000); + nv_icmd(priv, 0x0003b4, 0x00000000); + nv_icmd(priv, 0x0003b5, 0x00000000); + nv_icmd(priv, 0x0003b6, 0x00000000); + nv_icmd(priv, 0x0003b7, 0x00000000); + nv_icmd(priv, 0x0003b8, 0x00000000); + nv_icmd(priv, 0x0003b9, 0x00000000); + nv_icmd(priv, 0x0003ba, 0x00000000); + nv_icmd(priv, 0x0003bb, 0x00000000); + nv_icmd(priv, 0x0003bc, 0x00000000); + nv_icmd(priv, 0x0003bd, 0x00000000); + nv_icmd(priv, 0x0003be, 0x00000000); + nv_icmd(priv, 0x0003bf, 0x00000000); + nv_icmd(priv, 0x0002a0, 0x00000000); + nv_icmd(priv, 0x0002a1, 0x00000000); + nv_icmd(priv, 0x0002a2, 0x00000000); + nv_icmd(priv, 0x0002a3, 0x00000000); + nv_icmd(priv, 0x0002a4, 0x00000000); + nv_icmd(priv, 0x0002a5, 0x00000000); + nv_icmd(priv, 0x0002a6, 0x00000000); + nv_icmd(priv, 0x0002a7, 0x00000000); + nv_icmd(priv, 0x0002a8, 0x00000000); + nv_icmd(priv, 0x0002a9, 0x00000000); + nv_icmd(priv, 0x0002aa, 0x00000000); + nv_icmd(priv, 0x0002ab, 0x00000000); + nv_icmd(priv, 0x0002ac, 0x00000000); + nv_icmd(priv, 0x0002ad, 0x00000000); + nv_icmd(priv, 0x0002ae, 0x00000000); + nv_icmd(priv, 0x0002af, 0x00000000); + nv_icmd(priv, 0x000420, 0x00000000); + nv_icmd(priv, 0x000421, 0x00000000); + nv_icmd(priv, 0x000422, 0x00000000); + nv_icmd(priv, 0x000423, 0x00000000); + nv_icmd(priv, 0x000424, 0x00000000); + nv_icmd(priv, 0x000425, 0x00000000); + nv_icmd(priv, 0x000426, 0x00000000); + nv_icmd(priv, 0x000427, 0x00000000); + nv_icmd(priv, 0x000428, 0x00000000); + nv_icmd(priv, 0x000429, 0x00000000); + nv_icmd(priv, 0x00042a, 0x00000000); + nv_icmd(priv, 0x00042b, 0x00000000); + nv_icmd(priv, 0x00042c, 0x00000000); + nv_icmd(priv, 0x00042d, 0x00000000); + nv_icmd(priv, 0x00042e, 0x00000000); + nv_icmd(priv, 0x00042f, 0x00000000); + nv_icmd(priv, 0x0002b0, 0x00000000); + nv_icmd(priv, 0x0002b1, 0x00000000); + nv_icmd(priv, 0x0002b2, 0x00000000); + nv_icmd(priv, 0x0002b3, 0x00000000); + nv_icmd(priv, 0x0002b4, 0x00000000); + nv_icmd(priv, 0x0002b5, 0x00000000); + nv_icmd(priv, 0x0002b6, 0x00000000); + nv_icmd(priv, 0x0002b7, 0x00000000); + nv_icmd(priv, 0x0002b8, 0x00000000); + nv_icmd(priv, 0x0002b9, 0x00000000); + nv_icmd(priv, 0x0002ba, 0x00000000); + nv_icmd(priv, 0x0002bb, 0x00000000); + nv_icmd(priv, 0x0002bc, 0x00000000); + nv_icmd(priv, 0x0002bd, 0x00000000); + nv_icmd(priv, 0x0002be, 0x00000000); + nv_icmd(priv, 0x0002bf, 0x00000000); + nv_icmd(priv, 0x000430, 0x00000000); + nv_icmd(priv, 0x000431, 0x00000000); + nv_icmd(priv, 0x000432, 0x00000000); + nv_icmd(priv, 0x000433, 0x00000000); + nv_icmd(priv, 0x000434, 0x00000000); + nv_icmd(priv, 0x000435, 0x00000000); + nv_icmd(priv, 0x000436, 0x00000000); + nv_icmd(priv, 0x000437, 0x00000000); + nv_icmd(priv, 0x000438, 0x00000000); + nv_icmd(priv, 0x000439, 0x00000000); + nv_icmd(priv, 0x00043a, 0x00000000); + nv_icmd(priv, 0x00043b, 0x00000000); + nv_icmd(priv, 0x00043c, 0x00000000); + nv_icmd(priv, 0x00043d, 0x00000000); + nv_icmd(priv, 0x00043e, 0x00000000); + nv_icmd(priv, 0x00043f, 0x00000000); + nv_icmd(priv, 0x0002c0, 0x00000000); + nv_icmd(priv, 0x0002c1, 0x00000000); + nv_icmd(priv, 0x0002c2, 0x00000000); + nv_icmd(priv, 0x0002c3, 0x00000000); + nv_icmd(priv, 0x0002c4, 0x00000000); + nv_icmd(priv, 0x0002c5, 0x00000000); + nv_icmd(priv, 0x0002c6, 0x00000000); + nv_icmd(priv, 0x0002c7, 0x00000000); + nv_icmd(priv, 0x0002c8, 0x00000000); + nv_icmd(priv, 0x0002c9, 0x00000000); + nv_icmd(priv, 0x0002ca, 0x00000000); + nv_icmd(priv, 0x0002cb, 0x00000000); + nv_icmd(priv, 0x0002cc, 0x00000000); + nv_icmd(priv, 0x0002cd, 0x00000000); + nv_icmd(priv, 0x0002ce, 0x00000000); + nv_icmd(priv, 0x0002cf, 0x00000000); + nv_icmd(priv, 0x0004d0, 0x00000000); + nv_icmd(priv, 0x0004d1, 0x00000000); + nv_icmd(priv, 0x0004d2, 0x00000000); + nv_icmd(priv, 0x0004d3, 0x00000000); + nv_icmd(priv, 0x0004d4, 0x00000000); + nv_icmd(priv, 0x0004d5, 0x00000000); + nv_icmd(priv, 0x0004d6, 0x00000000); + nv_icmd(priv, 0x0004d7, 0x00000000); + nv_icmd(priv, 0x0004d8, 0x00000000); + nv_icmd(priv, 0x0004d9, 0x00000000); + nv_icmd(priv, 0x0004da, 0x00000000); + nv_icmd(priv, 0x0004db, 0x00000000); + nv_icmd(priv, 0x0004dc, 0x00000000); + nv_icmd(priv, 0x0004dd, 0x00000000); + nv_icmd(priv, 0x0004de, 0x00000000); + nv_icmd(priv, 0x0004df, 0x00000000); + nv_icmd(priv, 0x000720, 0x00000000); + nv_icmd(priv, 0x000721, 0x00000000); + nv_icmd(priv, 0x000722, 0x00000000); + nv_icmd(priv, 0x000723, 0x00000000); + nv_icmd(priv, 0x000724, 0x00000000); + nv_icmd(priv, 0x000725, 0x00000000); + nv_icmd(priv, 0x000726, 0x00000000); + nv_icmd(priv, 0x000727, 0x00000000); + nv_icmd(priv, 0x000728, 0x00000000); + nv_icmd(priv, 0x000729, 0x00000000); + nv_icmd(priv, 0x00072a, 0x00000000); + nv_icmd(priv, 0x00072b, 0x00000000); + nv_icmd(priv, 0x00072c, 0x00000000); + nv_icmd(priv, 0x00072d, 0x00000000); + nv_icmd(priv, 0x00072e, 0x00000000); + nv_icmd(priv, 0x00072f, 0x00000000); + nv_icmd(priv, 0x0008c0, 0x00000000); + nv_icmd(priv, 0x0008c1, 0x00000000); + nv_icmd(priv, 0x0008c2, 0x00000000); + nv_icmd(priv, 0x0008c3, 0x00000000); + nv_icmd(priv, 0x0008c4, 0x00000000); + nv_icmd(priv, 0x0008c5, 0x00000000); + nv_icmd(priv, 0x0008c6, 0x00000000); + nv_icmd(priv, 0x0008c7, 0x00000000); + nv_icmd(priv, 0x0008c8, 0x00000000); + nv_icmd(priv, 0x0008c9, 0x00000000); + nv_icmd(priv, 0x0008ca, 0x00000000); + nv_icmd(priv, 0x0008cb, 0x00000000); + nv_icmd(priv, 0x0008cc, 0x00000000); + nv_icmd(priv, 0x0008cd, 0x00000000); + nv_icmd(priv, 0x0008ce, 0x00000000); + nv_icmd(priv, 0x0008cf, 0x00000000); + nv_icmd(priv, 0x000890, 0x00000000); + nv_icmd(priv, 0x000891, 0x00000000); + nv_icmd(priv, 0x000892, 0x00000000); + nv_icmd(priv, 0x000893, 0x00000000); + nv_icmd(priv, 0x000894, 0x00000000); + nv_icmd(priv, 0x000895, 0x00000000); + nv_icmd(priv, 0x000896, 0x00000000); + nv_icmd(priv, 0x000897, 0x00000000); + nv_icmd(priv, 0x000898, 0x00000000); + nv_icmd(priv, 0x000899, 0x00000000); + nv_icmd(priv, 0x00089a, 0x00000000); + nv_icmd(priv, 0x00089b, 0x00000000); + nv_icmd(priv, 0x00089c, 0x00000000); + nv_icmd(priv, 0x00089d, 0x00000000); + nv_icmd(priv, 0x00089e, 0x00000000); + nv_icmd(priv, 0x00089f, 0x00000000); + nv_icmd(priv, 0x0008e0, 0x00000000); + nv_icmd(priv, 0x0008e1, 0x00000000); + nv_icmd(priv, 0x0008e2, 0x00000000); + nv_icmd(priv, 0x0008e3, 0x00000000); + nv_icmd(priv, 0x0008e4, 0x00000000); + nv_icmd(priv, 0x0008e5, 0x00000000); + nv_icmd(priv, 0x0008e6, 0x00000000); + nv_icmd(priv, 0x0008e7, 0x00000000); + nv_icmd(priv, 0x0008e8, 0x00000000); + nv_icmd(priv, 0x0008e9, 0x00000000); + nv_icmd(priv, 0x0008ea, 0x00000000); + nv_icmd(priv, 0x0008eb, 0x00000000); + nv_icmd(priv, 0x0008ec, 0x00000000); + nv_icmd(priv, 0x0008ed, 0x00000000); + nv_icmd(priv, 0x0008ee, 0x00000000); + nv_icmd(priv, 0x0008ef, 0x00000000); + nv_icmd(priv, 0x0008a0, 0x00000000); + nv_icmd(priv, 0x0008a1, 0x00000000); + nv_icmd(priv, 0x0008a2, 0x00000000); + nv_icmd(priv, 0x0008a3, 0x00000000); + nv_icmd(priv, 0x0008a4, 0x00000000); + nv_icmd(priv, 0x0008a5, 0x00000000); + nv_icmd(priv, 0x0008a6, 0x00000000); + nv_icmd(priv, 0x0008a7, 0x00000000); + nv_icmd(priv, 0x0008a8, 0x00000000); + nv_icmd(priv, 0x0008a9, 0x00000000); + nv_icmd(priv, 0x0008aa, 0x00000000); + nv_icmd(priv, 0x0008ab, 0x00000000); + nv_icmd(priv, 0x0008ac, 0x00000000); + nv_icmd(priv, 0x0008ad, 0x00000000); + nv_icmd(priv, 0x0008ae, 0x00000000); + nv_icmd(priv, 0x0008af, 0x00000000); + nv_icmd(priv, 0x0008f0, 0x00000000); + nv_icmd(priv, 0x0008f1, 0x00000000); + nv_icmd(priv, 0x0008f2, 0x00000000); + nv_icmd(priv, 0x0008f3, 0x00000000); + nv_icmd(priv, 0x0008f4, 0x00000000); + nv_icmd(priv, 0x0008f5, 0x00000000); + nv_icmd(priv, 0x0008f6, 0x00000000); + nv_icmd(priv, 0x0008f7, 0x00000000); + nv_icmd(priv, 0x0008f8, 0x00000000); + nv_icmd(priv, 0x0008f9, 0x00000000); + nv_icmd(priv, 0x0008fa, 0x00000000); + nv_icmd(priv, 0x0008fb, 0x00000000); + nv_icmd(priv, 0x0008fc, 0x00000000); + nv_icmd(priv, 0x0008fd, 0x00000000); + nv_icmd(priv, 0x0008fe, 0x00000000); + nv_icmd(priv, 0x0008ff, 0x00000000); + nv_icmd(priv, 0x00094c, 0x000000ff); + nv_icmd(priv, 0x00094d, 0xffffffff); + nv_icmd(priv, 0x00094e, 0x00000002); + nv_icmd(priv, 0x0002ec, 0x00000001); + nv_icmd(priv, 0x000303, 0x00000001); + nv_icmd(priv, 0x0002e6, 0x00000001); + nv_icmd(priv, 0x000466, 0x00000052); + nv_icmd(priv, 0x000301, 0x3f800000); + nv_icmd(priv, 0x000304, 0x30201000); + nv_icmd(priv, 0x000305, 0x70605040); + nv_icmd(priv, 0x000306, 0xb8a89888); + nv_icmd(priv, 0x000307, 0xf8e8d8c8); + nv_icmd(priv, 0x00030a, 0x00ffff00); + nv_icmd(priv, 0x00030b, 0x0000001a); + nv_icmd(priv, 0x00030c, 0x00000001); + nv_icmd(priv, 0x000318, 0x00000001); + nv_icmd(priv, 0x000340, 0x00000000); + nv_icmd(priv, 0x000375, 0x00000001); + nv_icmd(priv, 0x00037d, 0x00000006); + nv_icmd(priv, 0x0003a0, 0x00000002); + nv_icmd(priv, 0x0003aa, 0x00000001); + nv_icmd(priv, 0x0003a9, 0x00000001); + nv_icmd(priv, 0x000380, 0x00000001); + nv_icmd(priv, 0x000383, 0x00000011); + nv_icmd(priv, 0x000360, 0x00000040); + nv_icmd(priv, 0x000366, 0x00000000); + nv_icmd(priv, 0x000367, 0x00000000); + nv_icmd(priv, 0x000368, 0x00000fff); + nv_icmd(priv, 0x000370, 0x00000000); + nv_icmd(priv, 0x000371, 0x00000000); + nv_icmd(priv, 0x000372, 0x000fffff); + nv_icmd(priv, 0x00037a, 0x00000012); + nv_icmd(priv, 0x000619, 0x00000003); + nv_icmd(priv, 0x000811, 0x00000003); + nv_icmd(priv, 0x000812, 0x00000004); + nv_icmd(priv, 0x000813, 0x00000006); + nv_icmd(priv, 0x000814, 0x00000008); + nv_icmd(priv, 0x000815, 0x0000000b); + nv_icmd(priv, 0x000800, 0x00000001); + nv_icmd(priv, 0x000801, 0x00000001); + nv_icmd(priv, 0x000802, 0x00000001); + nv_icmd(priv, 0x000803, 0x00000001); + nv_icmd(priv, 0x000804, 0x00000001); + nv_icmd(priv, 0x000805, 0x00000001); + nv_icmd(priv, 0x000632, 0x00000001); + nv_icmd(priv, 0x000633, 0x00000002); + nv_icmd(priv, 0x000634, 0x00000003); + nv_icmd(priv, 0x000635, 0x00000004); + nv_icmd(priv, 0x000654, 0x3f800000); + nv_icmd(priv, 0x000657, 0x3f800000); + nv_icmd(priv, 0x000655, 0x3f800000); + nv_icmd(priv, 0x000656, 0x3f800000); + nv_icmd(priv, 0x0006cd, 0x3f800000); + nv_icmd(priv, 0x0007f5, 0x3f800000); + nv_icmd(priv, 0x0007dc, 0x39291909); + nv_icmd(priv, 0x0007dd, 0x79695949); + nv_icmd(priv, 0x0007de, 0xb9a99989); + nv_icmd(priv, 0x0007df, 0xf9e9d9c9); + nv_icmd(priv, 0x0007e8, 0x00003210); + nv_icmd(priv, 0x0007e9, 0x00007654); + nv_icmd(priv, 0x0007ea, 0x00000098); + nv_icmd(priv, 0x0007ec, 0x39291909); + nv_icmd(priv, 0x0007ed, 0x79695949); + nv_icmd(priv, 0x0007ee, 0xb9a99989); + nv_icmd(priv, 0x0007ef, 0xf9e9d9c9); + nv_icmd(priv, 0x0007f0, 0x00003210); + nv_icmd(priv, 0x0007f1, 0x00007654); + nv_icmd(priv, 0x0007f2, 0x00000098); + nv_icmd(priv, 0x0005a5, 0x00000001); + nv_icmd(priv, 0x000980, 0x00000000); + nv_icmd(priv, 0x000981, 0x00000000); + nv_icmd(priv, 0x000982, 0x00000000); + nv_icmd(priv, 0x000983, 0x00000000); + nv_icmd(priv, 0x000984, 0x00000000); + nv_icmd(priv, 0x000985, 0x00000000); + nv_icmd(priv, 0x000986, 0x00000000); + nv_icmd(priv, 0x000987, 0x00000000); + nv_icmd(priv, 0x000988, 0x00000000); + nv_icmd(priv, 0x000989, 0x00000000); + nv_icmd(priv, 0x00098a, 0x00000000); + nv_icmd(priv, 0x00098b, 0x00000000); + nv_icmd(priv, 0x00098c, 0x00000000); + nv_icmd(priv, 0x00098d, 0x00000000); + nv_icmd(priv, 0x00098e, 0x00000000); + nv_icmd(priv, 0x00098f, 0x00000000); + nv_icmd(priv, 0x000990, 0x00000000); + nv_icmd(priv, 0x000991, 0x00000000); + nv_icmd(priv, 0x000992, 0x00000000); + nv_icmd(priv, 0x000993, 0x00000000); + nv_icmd(priv, 0x000994, 0x00000000); + nv_icmd(priv, 0x000995, 0x00000000); + nv_icmd(priv, 0x000996, 0x00000000); + nv_icmd(priv, 0x000997, 0x00000000); + nv_icmd(priv, 0x000998, 0x00000000); + nv_icmd(priv, 0x000999, 0x00000000); + nv_icmd(priv, 0x00099a, 0x00000000); + nv_icmd(priv, 0x00099b, 0x00000000); + nv_icmd(priv, 0x00099c, 0x00000000); + nv_icmd(priv, 0x00099d, 0x00000000); + nv_icmd(priv, 0x00099e, 0x00000000); + nv_icmd(priv, 0x00099f, 0x00000000); + nv_icmd(priv, 0x0009a0, 0x00000000); + nv_icmd(priv, 0x0009a1, 0x00000000); + nv_icmd(priv, 0x0009a2, 0x00000000); + nv_icmd(priv, 0x0009a3, 0x00000000); + nv_icmd(priv, 0x0009a4, 0x00000000); + nv_icmd(priv, 0x0009a5, 0x00000000); + nv_icmd(priv, 0x0009a6, 0x00000000); + nv_icmd(priv, 0x0009a7, 0x00000000); + nv_icmd(priv, 0x0009a8, 0x00000000); + nv_icmd(priv, 0x0009a9, 0x00000000); + nv_icmd(priv, 0x0009aa, 0x00000000); + nv_icmd(priv, 0x0009ab, 0x00000000); + nv_icmd(priv, 0x0009ac, 0x00000000); + nv_icmd(priv, 0x0009ad, 0x00000000); + nv_icmd(priv, 0x0009ae, 0x00000000); + nv_icmd(priv, 0x0009af, 0x00000000); + nv_icmd(priv, 0x0009b0, 0x00000000); + nv_icmd(priv, 0x0009b1, 0x00000000); + nv_icmd(priv, 0x0009b2, 0x00000000); + nv_icmd(priv, 0x0009b3, 0x00000000); + nv_icmd(priv, 0x0009b4, 0x00000000); + nv_icmd(priv, 0x0009b5, 0x00000000); + nv_icmd(priv, 0x0009b6, 0x00000000); + nv_icmd(priv, 0x0009b7, 0x00000000); + nv_icmd(priv, 0x0009b8, 0x00000000); + nv_icmd(priv, 0x0009b9, 0x00000000); + nv_icmd(priv, 0x0009ba, 0x00000000); + nv_icmd(priv, 0x0009bb, 0x00000000); + nv_icmd(priv, 0x0009bc, 0x00000000); + nv_icmd(priv, 0x0009bd, 0x00000000); + nv_icmd(priv, 0x0009be, 0x00000000); + nv_icmd(priv, 0x0009bf, 0x00000000); + nv_icmd(priv, 0x0009c0, 0x00000000); + nv_icmd(priv, 0x0009c1, 0x00000000); + nv_icmd(priv, 0x0009c2, 0x00000000); + nv_icmd(priv, 0x0009c3, 0x00000000); + nv_icmd(priv, 0x0009c4, 0x00000000); + nv_icmd(priv, 0x0009c5, 0x00000000); + nv_icmd(priv, 0x0009c6, 0x00000000); + nv_icmd(priv, 0x0009c7, 0x00000000); + nv_icmd(priv, 0x0009c8, 0x00000000); + nv_icmd(priv, 0x0009c9, 0x00000000); + nv_icmd(priv, 0x0009ca, 0x00000000); + nv_icmd(priv, 0x0009cb, 0x00000000); + nv_icmd(priv, 0x0009cc, 0x00000000); + nv_icmd(priv, 0x0009cd, 0x00000000); + nv_icmd(priv, 0x0009ce, 0x00000000); + nv_icmd(priv, 0x0009cf, 0x00000000); + nv_icmd(priv, 0x0009d0, 0x00000000); + nv_icmd(priv, 0x0009d1, 0x00000000); + nv_icmd(priv, 0x0009d2, 0x00000000); + nv_icmd(priv, 0x0009d3, 0x00000000); + nv_icmd(priv, 0x0009d4, 0x00000000); + nv_icmd(priv, 0x0009d5, 0x00000000); + nv_icmd(priv, 0x0009d6, 0x00000000); + nv_icmd(priv, 0x0009d7, 0x00000000); + nv_icmd(priv, 0x0009d8, 0x00000000); + nv_icmd(priv, 0x0009d9, 0x00000000); + nv_icmd(priv, 0x0009da, 0x00000000); + nv_icmd(priv, 0x0009db, 0x00000000); + nv_icmd(priv, 0x0009dc, 0x00000000); + nv_icmd(priv, 0x0009dd, 0x00000000); + nv_icmd(priv, 0x0009de, 0x00000000); + nv_icmd(priv, 0x0009df, 0x00000000); + nv_icmd(priv, 0x0009e0, 0x00000000); + nv_icmd(priv, 0x0009e1, 0x00000000); + nv_icmd(priv, 0x0009e2, 0x00000000); + nv_icmd(priv, 0x0009e3, 0x00000000); + nv_icmd(priv, 0x0009e4, 0x00000000); + nv_icmd(priv, 0x0009e5, 0x00000000); + nv_icmd(priv, 0x0009e6, 0x00000000); + nv_icmd(priv, 0x0009e7, 0x00000000); + nv_icmd(priv, 0x0009e8, 0x00000000); + nv_icmd(priv, 0x0009e9, 0x00000000); + nv_icmd(priv, 0x0009ea, 0x00000000); + nv_icmd(priv, 0x0009eb, 0x00000000); + nv_icmd(priv, 0x0009ec, 0x00000000); + nv_icmd(priv, 0x0009ed, 0x00000000); + nv_icmd(priv, 0x0009ee, 0x00000000); + nv_icmd(priv, 0x0009ef, 0x00000000); + nv_icmd(priv, 0x0009f0, 0x00000000); + nv_icmd(priv, 0x0009f1, 0x00000000); + nv_icmd(priv, 0x0009f2, 0x00000000); + nv_icmd(priv, 0x0009f3, 0x00000000); + nv_icmd(priv, 0x0009f4, 0x00000000); + nv_icmd(priv, 0x0009f5, 0x00000000); + nv_icmd(priv, 0x0009f6, 0x00000000); + nv_icmd(priv, 0x0009f7, 0x00000000); + nv_icmd(priv, 0x0009f8, 0x00000000); + nv_icmd(priv, 0x0009f9, 0x00000000); + nv_icmd(priv, 0x0009fa, 0x00000000); + nv_icmd(priv, 0x0009fb, 0x00000000); + nv_icmd(priv, 0x0009fc, 0x00000000); + nv_icmd(priv, 0x0009fd, 0x00000000); + nv_icmd(priv, 0x0009fe, 0x00000000); + nv_icmd(priv, 0x0009ff, 0x00000000); + nv_icmd(priv, 0x000468, 0x00000004); + nv_icmd(priv, 0x00046c, 0x00000001); + nv_icmd(priv, 0x000470, 0x00000000); + nv_icmd(priv, 0x000471, 0x00000000); + nv_icmd(priv, 0x000472, 0x00000000); + nv_icmd(priv, 0x000473, 0x00000000); + nv_icmd(priv, 0x000474, 0x00000000); + nv_icmd(priv, 0x000475, 0x00000000); + nv_icmd(priv, 0x000476, 0x00000000); + nv_icmd(priv, 0x000477, 0x00000000); + nv_icmd(priv, 0x000478, 0x00000000); + nv_icmd(priv, 0x000479, 0x00000000); + nv_icmd(priv, 0x00047a, 0x00000000); + nv_icmd(priv, 0x00047b, 0x00000000); + nv_icmd(priv, 0x00047c, 0x00000000); + nv_icmd(priv, 0x00047d, 0x00000000); + nv_icmd(priv, 0x00047e, 0x00000000); + nv_icmd(priv, 0x00047f, 0x00000000); + nv_icmd(priv, 0x000480, 0x00000000); + nv_icmd(priv, 0x000481, 0x00000000); + nv_icmd(priv, 0x000482, 0x00000000); + nv_icmd(priv, 0x000483, 0x00000000); + nv_icmd(priv, 0x000484, 0x00000000); + nv_icmd(priv, 0x000485, 0x00000000); + nv_icmd(priv, 0x000486, 0x00000000); + nv_icmd(priv, 0x000487, 0x00000000); + nv_icmd(priv, 0x000488, 0x00000000); + nv_icmd(priv, 0x000489, 0x00000000); + nv_icmd(priv, 0x00048a, 0x00000000); + nv_icmd(priv, 0x00048b, 0x00000000); + nv_icmd(priv, 0x00048c, 0x00000000); + nv_icmd(priv, 0x00048d, 0x00000000); + nv_icmd(priv, 0x00048e, 0x00000000); + nv_icmd(priv, 0x00048f, 0x00000000); + nv_icmd(priv, 0x000490, 0x00000000); + nv_icmd(priv, 0x000491, 0x00000000); + nv_icmd(priv, 0x000492, 0x00000000); + nv_icmd(priv, 0x000493, 0x00000000); + nv_icmd(priv, 0x000494, 0x00000000); + nv_icmd(priv, 0x000495, 0x00000000); + nv_icmd(priv, 0x000496, 0x00000000); + nv_icmd(priv, 0x000497, 0x00000000); + nv_icmd(priv, 0x000498, 0x00000000); + nv_icmd(priv, 0x000499, 0x00000000); + nv_icmd(priv, 0x00049a, 0x00000000); + nv_icmd(priv, 0x00049b, 0x00000000); + nv_icmd(priv, 0x00049c, 0x00000000); + nv_icmd(priv, 0x00049d, 0x00000000); + nv_icmd(priv, 0x00049e, 0x00000000); + nv_icmd(priv, 0x00049f, 0x00000000); + nv_icmd(priv, 0x0004a0, 0x00000000); + nv_icmd(priv, 0x0004a1, 0x00000000); + nv_icmd(priv, 0x0004a2, 0x00000000); + nv_icmd(priv, 0x0004a3, 0x00000000); + nv_icmd(priv, 0x0004a4, 0x00000000); + nv_icmd(priv, 0x0004a5, 0x00000000); + nv_icmd(priv, 0x0004a6, 0x00000000); + nv_icmd(priv, 0x0004a7, 0x00000000); + nv_icmd(priv, 0x0004a8, 0x00000000); + nv_icmd(priv, 0x0004a9, 0x00000000); + nv_icmd(priv, 0x0004aa, 0x00000000); + nv_icmd(priv, 0x0004ab, 0x00000000); + nv_icmd(priv, 0x0004ac, 0x00000000); + nv_icmd(priv, 0x0004ad, 0x00000000); + nv_icmd(priv, 0x0004ae, 0x00000000); + nv_icmd(priv, 0x0004af, 0x00000000); + nv_icmd(priv, 0x0004b0, 0x00000000); + nv_icmd(priv, 0x0004b1, 0x00000000); + nv_icmd(priv, 0x0004b2, 0x00000000); + nv_icmd(priv, 0x0004b3, 0x00000000); + nv_icmd(priv, 0x0004b4, 0x00000000); + nv_icmd(priv, 0x0004b5, 0x00000000); + nv_icmd(priv, 0x0004b6, 0x00000000); + nv_icmd(priv, 0x0004b7, 0x00000000); + nv_icmd(priv, 0x0004b8, 0x00000000); + nv_icmd(priv, 0x0004b9, 0x00000000); + nv_icmd(priv, 0x0004ba, 0x00000000); + nv_icmd(priv, 0x0004bb, 0x00000000); + nv_icmd(priv, 0x0004bc, 0x00000000); + nv_icmd(priv, 0x0004bd, 0x00000000); + nv_icmd(priv, 0x0004be, 0x00000000); + nv_icmd(priv, 0x0004bf, 0x00000000); + nv_icmd(priv, 0x0004c0, 0x00000000); + nv_icmd(priv, 0x0004c1, 0x00000000); + nv_icmd(priv, 0x0004c2, 0x00000000); + nv_icmd(priv, 0x0004c3, 0x00000000); + nv_icmd(priv, 0x0004c4, 0x00000000); + nv_icmd(priv, 0x0004c5, 0x00000000); + nv_icmd(priv, 0x0004c6, 0x00000000); + nv_icmd(priv, 0x0004c7, 0x00000000); + nv_icmd(priv, 0x0004c8, 0x00000000); + nv_icmd(priv, 0x0004c9, 0x00000000); + nv_icmd(priv, 0x0004ca, 0x00000000); + nv_icmd(priv, 0x0004cb, 0x00000000); + nv_icmd(priv, 0x0004cc, 0x00000000); + nv_icmd(priv, 0x0004cd, 0x00000000); + nv_icmd(priv, 0x0004ce, 0x00000000); + nv_icmd(priv, 0x0004cf, 0x00000000); + nv_icmd(priv, 0x000510, 0x3f800000); + nv_icmd(priv, 0x000511, 0x3f800000); + nv_icmd(priv, 0x000512, 0x3f800000); + nv_icmd(priv, 0x000513, 0x3f800000); + nv_icmd(priv, 0x000514, 0x3f800000); + nv_icmd(priv, 0x000515, 0x3f800000); + nv_icmd(priv, 0x000516, 0x3f800000); + nv_icmd(priv, 0x000517, 0x3f800000); + nv_icmd(priv, 0x000518, 0x3f800000); + nv_icmd(priv, 0x000519, 0x3f800000); + nv_icmd(priv, 0x00051a, 0x3f800000); + nv_icmd(priv, 0x00051b, 0x3f800000); + nv_icmd(priv, 0x00051c, 0x3f800000); + nv_icmd(priv, 0x00051d, 0x3f800000); + nv_icmd(priv, 0x00051e, 0x3f800000); + nv_icmd(priv, 0x00051f, 0x3f800000); + nv_icmd(priv, 0x000520, 0x000002b6); + nv_icmd(priv, 0x000529, 0x00000001); + nv_icmd(priv, 0x000530, 0xffff0000); + nv_icmd(priv, 0x000531, 0xffff0000); + nv_icmd(priv, 0x000532, 0xffff0000); + nv_icmd(priv, 0x000533, 0xffff0000); + nv_icmd(priv, 0x000534, 0xffff0000); + nv_icmd(priv, 0x000535, 0xffff0000); + nv_icmd(priv, 0x000536, 0xffff0000); + nv_icmd(priv, 0x000537, 0xffff0000); + nv_icmd(priv, 0x000538, 0xffff0000); + nv_icmd(priv, 0x000539, 0xffff0000); + nv_icmd(priv, 0x00053a, 0xffff0000); + nv_icmd(priv, 0x00053b, 0xffff0000); + nv_icmd(priv, 0x00053c, 0xffff0000); + nv_icmd(priv, 0x00053d, 0xffff0000); + nv_icmd(priv, 0x00053e, 0xffff0000); + nv_icmd(priv, 0x00053f, 0xffff0000); + nv_icmd(priv, 0x000585, 0x0000003f); + nv_icmd(priv, 0x000576, 0x00000003); + nv_icmd(priv, 0x00057b, 0x00000059); + nv_icmd(priv, 0x000586, 0x00000040); + nv_icmd(priv, 0x000582, 0x00000080); + nv_icmd(priv, 0x000583, 0x00000080); + nv_icmd(priv, 0x0005c2, 0x00000001); + nv_icmd(priv, 0x000638, 0x00000001); + nv_icmd(priv, 0x000639, 0x00000001); + nv_icmd(priv, 0x00063a, 0x00000002); + nv_icmd(priv, 0x00063b, 0x00000001); + nv_icmd(priv, 0x00063c, 0x00000001); + nv_icmd(priv, 0x00063d, 0x00000002); + nv_icmd(priv, 0x00063e, 0x00000001); + nv_icmd(priv, 0x0008b8, 0x00000001); + nv_icmd(priv, 0x0008b9, 0x00000001); + nv_icmd(priv, 0x0008ba, 0x00000001); + nv_icmd(priv, 0x0008bb, 0x00000001); + nv_icmd(priv, 0x0008bc, 0x00000001); + nv_icmd(priv, 0x0008bd, 0x00000001); + nv_icmd(priv, 0x0008be, 0x00000001); + nv_icmd(priv, 0x0008bf, 0x00000001); + nv_icmd(priv, 0x000900, 0x00000001); + nv_icmd(priv, 0x000901, 0x00000001); + nv_icmd(priv, 0x000902, 0x00000001); + nv_icmd(priv, 0x000903, 0x00000001); + nv_icmd(priv, 0x000904, 0x00000001); + nv_icmd(priv, 0x000905, 0x00000001); + nv_icmd(priv, 0x000906, 0x00000001); + nv_icmd(priv, 0x000907, 0x00000001); + nv_icmd(priv, 0x000908, 0x00000002); + nv_icmd(priv, 0x000909, 0x00000002); + nv_icmd(priv, 0x00090a, 0x00000002); + nv_icmd(priv, 0x00090b, 0x00000002); + nv_icmd(priv, 0x00090c, 0x00000002); + nv_icmd(priv, 0x00090d, 0x00000002); + nv_icmd(priv, 0x00090e, 0x00000002); + nv_icmd(priv, 0x00090f, 0x00000002); + nv_icmd(priv, 0x000910, 0x00000001); + nv_icmd(priv, 0x000911, 0x00000001); + nv_icmd(priv, 0x000912, 0x00000001); + nv_icmd(priv, 0x000913, 0x00000001); + nv_icmd(priv, 0x000914, 0x00000001); + nv_icmd(priv, 0x000915, 0x00000001); + nv_icmd(priv, 0x000916, 0x00000001); + nv_icmd(priv, 0x000917, 0x00000001); + nv_icmd(priv, 0x000918, 0x00000001); + nv_icmd(priv, 0x000919, 0x00000001); + nv_icmd(priv, 0x00091a, 0x00000001); + nv_icmd(priv, 0x00091b, 0x00000001); + nv_icmd(priv, 0x00091c, 0x00000001); + nv_icmd(priv, 0x00091d, 0x00000001); + nv_icmd(priv, 0x00091e, 0x00000001); + nv_icmd(priv, 0x00091f, 0x00000001); + nv_icmd(priv, 0x000920, 0x00000002); + nv_icmd(priv, 0x000921, 0x00000002); + nv_icmd(priv, 0x000922, 0x00000002); + nv_icmd(priv, 0x000923, 0x00000002); + nv_icmd(priv, 0x000924, 0x00000002); + nv_icmd(priv, 0x000925, 0x00000002); + nv_icmd(priv, 0x000926, 0x00000002); + nv_icmd(priv, 0x000927, 0x00000002); + nv_icmd(priv, 0x000928, 0x00000001); + nv_icmd(priv, 0x000929, 0x00000001); + nv_icmd(priv, 0x00092a, 0x00000001); + nv_icmd(priv, 0x00092b, 0x00000001); + nv_icmd(priv, 0x00092c, 0x00000001); + nv_icmd(priv, 0x00092d, 0x00000001); + nv_icmd(priv, 0x00092e, 0x00000001); + nv_icmd(priv, 0x00092f, 0x00000001); + nv_icmd(priv, 0x000648, 0x00000001); + nv_icmd(priv, 0x000649, 0x00000001); + nv_icmd(priv, 0x00064a, 0x00000001); + nv_icmd(priv, 0x00064b, 0x00000001); + nv_icmd(priv, 0x00064c, 0x00000001); + nv_icmd(priv, 0x00064d, 0x00000001); + nv_icmd(priv, 0x00064e, 0x00000001); + nv_icmd(priv, 0x00064f, 0x00000001); + nv_icmd(priv, 0x000650, 0x00000001); + nv_icmd(priv, 0x000658, 0x0000000f); + nv_icmd(priv, 0x0007ff, 0x0000000a); + nv_icmd(priv, 0x00066a, 0x40000000); + nv_icmd(priv, 0x00066b, 0x10000000); + nv_icmd(priv, 0x00066c, 0xffff0000); + nv_icmd(priv, 0x00066d, 0xffff0000); + nv_icmd(priv, 0x0007af, 0x00000008); + nv_icmd(priv, 0x0007b0, 0x00000008); + nv_icmd(priv, 0x0007f6, 0x00000001); + nv_icmd(priv, 0x0006b2, 0x00000055); + nv_icmd(priv, 0x0007ad, 0x00000003); + nv_icmd(priv, 0x000937, 0x00000001); + nv_icmd(priv, 0x000971, 0x00000008); + nv_icmd(priv, 0x000972, 0x00000040); + nv_icmd(priv, 0x000973, 0x0000012c); + nv_icmd(priv, 0x00097c, 0x00000040); + nv_icmd(priv, 0x000979, 0x00000003); + nv_icmd(priv, 0x000975, 0x00000020); + nv_icmd(priv, 0x000976, 0x00000001); + nv_icmd(priv, 0x000977, 0x00000020); + nv_icmd(priv, 0x000978, 0x00000001); + nv_icmd(priv, 0x000957, 0x00000003); + nv_icmd(priv, 0x00095e, 0x20164010); + nv_icmd(priv, 0x00095f, 0x00000020); + nv_icmd(priv, 0x00097d, 0x00000020); + nv_icmd(priv, 0x000683, 0x00000006); + nv_icmd(priv, 0x000685, 0x003fffff); + nv_icmd(priv, 0x000687, 0x003fffff); + nv_icmd(priv, 0x0006a0, 0x00000005); + nv_icmd(priv, 0x000840, 0x00400008); + nv_icmd(priv, 0x000841, 0x08000080); + nv_icmd(priv, 0x000842, 0x00400008); + nv_icmd(priv, 0x000843, 0x08000080); + nv_icmd(priv, 0x000818, 0x00000000); + nv_icmd(priv, 0x000819, 0x00000000); + nv_icmd(priv, 0x00081a, 0x00000000); + nv_icmd(priv, 0x00081b, 0x00000000); + nv_icmd(priv, 0x00081c, 0x00000000); + nv_icmd(priv, 0x00081d, 0x00000000); + nv_icmd(priv, 0x00081e, 0x00000000); + nv_icmd(priv, 0x00081f, 0x00000000); + nv_icmd(priv, 0x000848, 0x00000000); + nv_icmd(priv, 0x000849, 0x00000000); + nv_icmd(priv, 0x00084a, 0x00000000); + nv_icmd(priv, 0x00084b, 0x00000000); + nv_icmd(priv, 0x00084c, 0x00000000); + nv_icmd(priv, 0x00084d, 0x00000000); + nv_icmd(priv, 0x00084e, 0x00000000); + nv_icmd(priv, 0x00084f, 0x00000000); + nv_icmd(priv, 0x000850, 0x00000000); + nv_icmd(priv, 0x000851, 0x00000000); + nv_icmd(priv, 0x000852, 0x00000000); + nv_icmd(priv, 0x000853, 0x00000000); + nv_icmd(priv, 0x000854, 0x00000000); + nv_icmd(priv, 0x000855, 0x00000000); + nv_icmd(priv, 0x000856, 0x00000000); + nv_icmd(priv, 0x000857, 0x00000000); + nv_icmd(priv, 0x000738, 0x00000000); + nv_icmd(priv, 0x0006aa, 0x00000001); + nv_icmd(priv, 0x0006ab, 0x00000002); + nv_icmd(priv, 0x0006ac, 0x00000080); + nv_icmd(priv, 0x0006ad, 0x00000100); + nv_icmd(priv, 0x0006ae, 0x00000100); + nv_icmd(priv, 0x0006b1, 0x00000011); + nv_icmd(priv, 0x0006bb, 0x000000cf); + nv_icmd(priv, 0x0006ce, 0x2a712488); + nv_icmd(priv, 0x000739, 0x4085c000); + nv_icmd(priv, 0x00073a, 0x00000080); + nv_icmd(priv, 0x000786, 0x80000100); + nv_icmd(priv, 0x00073c, 0x00010100); + nv_icmd(priv, 0x00073d, 0x02800000); + nv_icmd(priv, 0x000787, 0x000000cf); + nv_icmd(priv, 0x00078c, 0x00000008); + nv_icmd(priv, 0x000792, 0x00000001); + nv_icmd(priv, 0x000794, 0x00000001); + nv_icmd(priv, 0x000795, 0x00000001); + nv_icmd(priv, 0x000796, 0x00000001); + nv_icmd(priv, 0x000797, 0x000000cf); + nv_icmd(priv, 0x000836, 0x00000001); + nv_icmd(priv, 0x00079a, 0x00000002); + nv_icmd(priv, 0x000833, 0x04444480); + nv_icmd(priv, 0x0007a1, 0x00000001); + nv_icmd(priv, 0x0007a3, 0x00000001); + nv_icmd(priv, 0x0007a4, 0x00000001); + nv_icmd(priv, 0x0007a5, 0x00000001); + nv_icmd(priv, 0x000831, 0x00000004); + nv_icmd(priv, 0x000b07, 0x00000002); + nv_icmd(priv, 0x000b08, 0x00000100); + nv_icmd(priv, 0x000b09, 0x00000100); + nv_icmd(priv, 0x000b0a, 0x00000001); + nv_icmd(priv, 0x000a04, 0x000000ff); + nv_icmd(priv, 0x000a0b, 0x00000040); + nv_icmd(priv, 0x00097f, 0x00000100); + nv_icmd(priv, 0x000a02, 0x00000001); + nv_icmd(priv, 0x000809, 0x00000007); + nv_icmd(priv, 0x00c221, 0x00000040); + nv_icmd(priv, 0x00c1b0, 0x0000000f); + nv_icmd(priv, 0x00c1b1, 0x0000000f); + nv_icmd(priv, 0x00c1b2, 0x0000000f); + nv_icmd(priv, 0x00c1b3, 0x0000000f); + nv_icmd(priv, 0x00c1b4, 0x0000000f); + nv_icmd(priv, 0x00c1b5, 0x0000000f); + nv_icmd(priv, 0x00c1b6, 0x0000000f); + nv_icmd(priv, 0x00c1b7, 0x0000000f); + nv_icmd(priv, 0x00c1b8, 0x0fac6881); + nv_icmd(priv, 0x00c1b9, 0x00fac688); + nv_icmd(priv, 0x00c401, 0x00000001); + nv_icmd(priv, 0x00c402, 0x00010001); + nv_icmd(priv, 0x00c403, 0x00000001); + nv_icmd(priv, 0x00c404, 0x00000001); + nv_icmd(priv, 0x00c40e, 0x00000020); + nv_icmd(priv, 0x00c500, 0x00000003); + nv_icmd(priv, 0x01e100, 0x00000001); + nv_icmd(priv, 0x001000, 0x00000002); + nv_icmd(priv, 0x0006aa, 0x00000001); + nv_icmd(priv, 0x0006ad, 0x00000100); + nv_icmd(priv, 0x0006ae, 0x00000100); + nv_icmd(priv, 0x0006b1, 0x00000011); + nv_icmd(priv, 0x00078c, 0x00000008); + nv_icmd(priv, 0x000792, 0x00000001); + nv_icmd(priv, 0x000794, 0x00000001); + nv_icmd(priv, 0x000795, 0x00000001); + nv_icmd(priv, 0x000796, 0x00000001); + nv_icmd(priv, 0x000797, 0x000000cf); + nv_icmd(priv, 0x00079a, 0x00000002); + nv_icmd(priv, 0x000833, 0x04444480); + nv_icmd(priv, 0x0007a1, 0x00000001); + nv_icmd(priv, 0x0007a3, 0x00000001); + nv_icmd(priv, 0x0007a4, 0x00000001); + nv_icmd(priv, 0x0007a5, 0x00000001); + nv_icmd(priv, 0x000831, 0x00000004); + nv_icmd(priv, 0x01e100, 0x00000001); + nv_icmd(priv, 0x001000, 0x00000008); + nv_icmd(priv, 0x000039, 0x00000000); + nv_icmd(priv, 0x00003a, 0x00000000); + nv_icmd(priv, 0x00003b, 0x00000000); + nv_icmd(priv, 0x000380, 0x00000001); + nv_icmd(priv, 0x000366, 0x00000000); + nv_icmd(priv, 0x000367, 0x00000000); + nv_icmd(priv, 0x000368, 0x00000fff); + nv_icmd(priv, 0x000370, 0x00000000); + nv_icmd(priv, 0x000371, 0x00000000); + nv_icmd(priv, 0x000372, 0x000fffff); + nv_icmd(priv, 0x000813, 0x00000006); + nv_icmd(priv, 0x000814, 0x00000008); + nv_icmd(priv, 0x000957, 0x00000003); + nv_icmd(priv, 0x000818, 0x00000000); + nv_icmd(priv, 0x000819, 0x00000000); + nv_icmd(priv, 0x00081a, 0x00000000); + nv_icmd(priv, 0x00081b, 0x00000000); + nv_icmd(priv, 0x00081c, 0x00000000); + nv_icmd(priv, 0x00081d, 0x00000000); + nv_icmd(priv, 0x00081e, 0x00000000); + nv_icmd(priv, 0x00081f, 0x00000000); + nv_icmd(priv, 0x000848, 0x00000000); + nv_icmd(priv, 0x000849, 0x00000000); + nv_icmd(priv, 0x00084a, 0x00000000); + nv_icmd(priv, 0x00084b, 0x00000000); + nv_icmd(priv, 0x00084c, 0x00000000); + nv_icmd(priv, 0x00084d, 0x00000000); + nv_icmd(priv, 0x00084e, 0x00000000); + nv_icmd(priv, 0x00084f, 0x00000000); + nv_icmd(priv, 0x000850, 0x00000000); + nv_icmd(priv, 0x000851, 0x00000000); + nv_icmd(priv, 0x000852, 0x00000000); + nv_icmd(priv, 0x000853, 0x00000000); + nv_icmd(priv, 0x000854, 0x00000000); + nv_icmd(priv, 0x000855, 0x00000000); + nv_icmd(priv, 0x000856, 0x00000000); + nv_icmd(priv, 0x000857, 0x00000000); + nv_icmd(priv, 0x000738, 0x00000000); + nv_icmd(priv, 0x000b07, 0x00000002); + nv_icmd(priv, 0x000b08, 0x00000100); + nv_icmd(priv, 0x000b09, 0x00000100); + nv_icmd(priv, 0x000b0a, 0x00000001); + nv_icmd(priv, 0x000a04, 0x000000ff); + nv_icmd(priv, 0x00097f, 0x00000100); + nv_icmd(priv, 0x000a02, 0x00000001); + nv_icmd(priv, 0x000809, 0x00000007); + nv_icmd(priv, 0x00c221, 0x00000040); + nv_icmd(priv, 0x00c401, 0x00000001); + nv_icmd(priv, 0x00c402, 0x00010001); + nv_icmd(priv, 0x00c403, 0x00000001); + nv_icmd(priv, 0x00c404, 0x00000001); + nv_icmd(priv, 0x00c40e, 0x00000020); + nv_icmd(priv, 0x00c500, 0x00000003); + nv_icmd(priv, 0x01e100, 0x00000001); + nv_icmd(priv, 0x001000, 0x00000001); + nv_icmd(priv, 0x000b07, 0x00000002); + nv_icmd(priv, 0x000b08, 0x00000100); + nv_icmd(priv, 0x000b09, 0x00000100); + nv_icmd(priv, 0x000b0a, 0x00000001); + nv_icmd(priv, 0x01e100, 0x00000001); + nv_wr32(priv, 0x400208, 0x00000000); +} + +static void +nve0_grctx_generate_a097(struct nvc0_graph_priv *priv) +{ + nv_mthd(priv, 0xa097, 0x0800, 0x00000000); + nv_mthd(priv, 0xa097, 0x0840, 0x00000000); + nv_mthd(priv, 0xa097, 0x0880, 0x00000000); + nv_mthd(priv, 0xa097, 0x08c0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0900, 0x00000000); + nv_mthd(priv, 0xa097, 0x0940, 0x00000000); + nv_mthd(priv, 0xa097, 0x0980, 0x00000000); + nv_mthd(priv, 0xa097, 0x09c0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0804, 0x00000000); + nv_mthd(priv, 0xa097, 0x0844, 0x00000000); + nv_mthd(priv, 0xa097, 0x0884, 0x00000000); + nv_mthd(priv, 0xa097, 0x08c4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0904, 0x00000000); + nv_mthd(priv, 0xa097, 0x0944, 0x00000000); + nv_mthd(priv, 0xa097, 0x0984, 0x00000000); + nv_mthd(priv, 0xa097, 0x09c4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0808, 0x00000400); + nv_mthd(priv, 0xa097, 0x0848, 0x00000400); + nv_mthd(priv, 0xa097, 0x0888, 0x00000400); + nv_mthd(priv, 0xa097, 0x08c8, 0x00000400); + nv_mthd(priv, 0xa097, 0x0908, 0x00000400); + nv_mthd(priv, 0xa097, 0x0948, 0x00000400); + nv_mthd(priv, 0xa097, 0x0988, 0x00000400); + nv_mthd(priv, 0xa097, 0x09c8, 0x00000400); + nv_mthd(priv, 0xa097, 0x080c, 0x00000300); + nv_mthd(priv, 0xa097, 0x084c, 0x00000300); + nv_mthd(priv, 0xa097, 0x088c, 0x00000300); + nv_mthd(priv, 0xa097, 0x08cc, 0x00000300); + nv_mthd(priv, 0xa097, 0x090c, 0x00000300); + nv_mthd(priv, 0xa097, 0x094c, 0x00000300); + nv_mthd(priv, 0xa097, 0x098c, 0x00000300); + nv_mthd(priv, 0xa097, 0x09cc, 0x00000300); + nv_mthd(priv, 0xa097, 0x0810, 0x000000cf); + nv_mthd(priv, 0xa097, 0x0850, 0x00000000); + nv_mthd(priv, 0xa097, 0x0890, 0x00000000); + nv_mthd(priv, 0xa097, 0x08d0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0910, 0x00000000); + nv_mthd(priv, 0xa097, 0x0950, 0x00000000); + nv_mthd(priv, 0xa097, 0x0990, 0x00000000); + nv_mthd(priv, 0xa097, 0x09d0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0814, 0x00000040); + nv_mthd(priv, 0xa097, 0x0854, 0x00000040); + nv_mthd(priv, 0xa097, 0x0894, 0x00000040); + nv_mthd(priv, 0xa097, 0x08d4, 0x00000040); + nv_mthd(priv, 0xa097, 0x0914, 0x00000040); + nv_mthd(priv, 0xa097, 0x0954, 0x00000040); + nv_mthd(priv, 0xa097, 0x0994, 0x00000040); + nv_mthd(priv, 0xa097, 0x09d4, 0x00000040); + nv_mthd(priv, 0xa097, 0x0818, 0x00000001); + nv_mthd(priv, 0xa097, 0x0858, 0x00000001); + nv_mthd(priv, 0xa097, 0x0898, 0x00000001); + nv_mthd(priv, 0xa097, 0x08d8, 0x00000001); + nv_mthd(priv, 0xa097, 0x0918, 0x00000001); + nv_mthd(priv, 0xa097, 0x0958, 0x00000001); + nv_mthd(priv, 0xa097, 0x0998, 0x00000001); + nv_mthd(priv, 0xa097, 0x09d8, 0x00000001); + nv_mthd(priv, 0xa097, 0x081c, 0x00000000); + nv_mthd(priv, 0xa097, 0x085c, 0x00000000); + nv_mthd(priv, 0xa097, 0x089c, 0x00000000); + nv_mthd(priv, 0xa097, 0x08dc, 0x00000000); + nv_mthd(priv, 0xa097, 0x091c, 0x00000000); + nv_mthd(priv, 0xa097, 0x095c, 0x00000000); + nv_mthd(priv, 0xa097, 0x099c, 0x00000000); + nv_mthd(priv, 0xa097, 0x09dc, 0x00000000); + nv_mthd(priv, 0xa097, 0x0820, 0x00000000); + nv_mthd(priv, 0xa097, 0x0860, 0x00000000); + nv_mthd(priv, 0xa097, 0x08a0, 0x00000000); + nv_mthd(priv, 0xa097, 0x08e0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0920, 0x00000000); + nv_mthd(priv, 0xa097, 0x0960, 0x00000000); + nv_mthd(priv, 0xa097, 0x09a0, 0x00000000); + nv_mthd(priv, 0xa097, 0x09e0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c00, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c10, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c20, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c30, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c40, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c50, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c60, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c70, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c80, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c90, 0x00000000); + nv_mthd(priv, 0xa097, 0x1ca0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1cb0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1cc0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1cd0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1ce0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1cf0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c04, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c14, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c24, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c34, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c44, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c54, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c64, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c74, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c84, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c94, 0x00000000); + nv_mthd(priv, 0xa097, 0x1ca4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1cb4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1cc4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1cd4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1ce4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1cf4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c08, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c18, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c28, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c38, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c48, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c58, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c68, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c78, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c88, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c98, 0x00000000); + nv_mthd(priv, 0xa097, 0x1ca8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1cb8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1cc8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1cd8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1ce8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1cf8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c0c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c1c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c2c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c3c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c4c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c5c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c6c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c7c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c8c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1c9c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1cac, 0x00000000); + nv_mthd(priv, 0xa097, 0x1cbc, 0x00000000); + nv_mthd(priv, 0xa097, 0x1ccc, 0x00000000); + nv_mthd(priv, 0xa097, 0x1cdc, 0x00000000); + nv_mthd(priv, 0xa097, 0x1cec, 0x00000000); + nv_mthd(priv, 0xa097, 0x1cfc, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d00, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d10, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d20, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d30, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d40, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d50, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d60, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d70, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d80, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d90, 0x00000000); + nv_mthd(priv, 0xa097, 0x1da0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1db0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1dc0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1dd0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1de0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1df0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d04, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d14, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d24, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d34, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d44, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d54, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d64, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d74, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d84, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d94, 0x00000000); + nv_mthd(priv, 0xa097, 0x1da4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1db4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1dc4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1dd4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1de4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1df4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d08, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d18, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d28, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d38, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d48, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d58, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d68, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d78, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d88, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d98, 0x00000000); + nv_mthd(priv, 0xa097, 0x1da8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1db8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1dc8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1dd8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1de8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1df8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d0c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d1c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d2c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d3c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d4c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d5c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d6c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d7c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d8c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1d9c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1dac, 0x00000000); + nv_mthd(priv, 0xa097, 0x1dbc, 0x00000000); + nv_mthd(priv, 0xa097, 0x1dcc, 0x00000000); + nv_mthd(priv, 0xa097, 0x1ddc, 0x00000000); + nv_mthd(priv, 0xa097, 0x1dec, 0x00000000); + nv_mthd(priv, 0xa097, 0x1dfc, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f00, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f08, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f10, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f18, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f20, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f28, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f30, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f38, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f40, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f48, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f50, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f58, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f60, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f68, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f70, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f78, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f04, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f0c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f14, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f1c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f24, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f2c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f34, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f3c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f44, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f4c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f54, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f5c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f64, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f6c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f74, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f7c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f80, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f88, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f90, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f98, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fa0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fa8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fb0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fb8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fc0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fc8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fd0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fd8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fe0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fe8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1ff0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1ff8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f84, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f8c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f94, 0x00000000); + nv_mthd(priv, 0xa097, 0x1f9c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fa4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fac, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fb4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fbc, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fc4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fcc, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fd4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fdc, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fe4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1fec, 0x00000000); + nv_mthd(priv, 0xa097, 0x1ff4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1ffc, 0x00000000); + nv_mthd(priv, 0xa097, 0x2000, 0x00000000); + nv_mthd(priv, 0xa097, 0x2040, 0x00000011); + nv_mthd(priv, 0xa097, 0x2080, 0x00000020); + nv_mthd(priv, 0xa097, 0x20c0, 0x00000030); + nv_mthd(priv, 0xa097, 0x2100, 0x00000040); + nv_mthd(priv, 0xa097, 0x2140, 0x00000051); + nv_mthd(priv, 0xa097, 0x200c, 0x00000001); + nv_mthd(priv, 0xa097, 0x204c, 0x00000001); + nv_mthd(priv, 0xa097, 0x208c, 0x00000001); + nv_mthd(priv, 0xa097, 0x20cc, 0x00000001); + nv_mthd(priv, 0xa097, 0x210c, 0x00000001); + nv_mthd(priv, 0xa097, 0x214c, 0x00000001); + nv_mthd(priv, 0xa097, 0x2010, 0x00000000); + nv_mthd(priv, 0xa097, 0x2050, 0x00000000); + nv_mthd(priv, 0xa097, 0x2090, 0x00000001); + nv_mthd(priv, 0xa097, 0x20d0, 0x00000002); + nv_mthd(priv, 0xa097, 0x2110, 0x00000003); + nv_mthd(priv, 0xa097, 0x2150, 0x00000004); + nv_mthd(priv, 0xa097, 0x0380, 0x00000000); + nv_mthd(priv, 0xa097, 0x03a0, 0x00000000); + nv_mthd(priv, 0xa097, 0x03c0, 0x00000000); + nv_mthd(priv, 0xa097, 0x03e0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0384, 0x00000000); + nv_mthd(priv, 0xa097, 0x03a4, 0x00000000); + nv_mthd(priv, 0xa097, 0x03c4, 0x00000000); + nv_mthd(priv, 0xa097, 0x03e4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0388, 0x00000000); + nv_mthd(priv, 0xa097, 0x03a8, 0x00000000); + nv_mthd(priv, 0xa097, 0x03c8, 0x00000000); + nv_mthd(priv, 0xa097, 0x03e8, 0x00000000); + nv_mthd(priv, 0xa097, 0x038c, 0x00000000); + nv_mthd(priv, 0xa097, 0x03ac, 0x00000000); + nv_mthd(priv, 0xa097, 0x03cc, 0x00000000); + nv_mthd(priv, 0xa097, 0x03ec, 0x00000000); + nv_mthd(priv, 0xa097, 0x0700, 0x00000000); + nv_mthd(priv, 0xa097, 0x0710, 0x00000000); + nv_mthd(priv, 0xa097, 0x0720, 0x00000000); + nv_mthd(priv, 0xa097, 0x0730, 0x00000000); + nv_mthd(priv, 0xa097, 0x0704, 0x00000000); + nv_mthd(priv, 0xa097, 0x0714, 0x00000000); + nv_mthd(priv, 0xa097, 0x0724, 0x00000000); + nv_mthd(priv, 0xa097, 0x0734, 0x00000000); + nv_mthd(priv, 0xa097, 0x0708, 0x00000000); + nv_mthd(priv, 0xa097, 0x0718, 0x00000000); + nv_mthd(priv, 0xa097, 0x0728, 0x00000000); + nv_mthd(priv, 0xa097, 0x0738, 0x00000000); + nv_mthd(priv, 0xa097, 0x2800, 0x00000000); + nv_mthd(priv, 0xa097, 0x2804, 0x00000000); + nv_mthd(priv, 0xa097, 0x2808, 0x00000000); + nv_mthd(priv, 0xa097, 0x280c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2810, 0x00000000); + nv_mthd(priv, 0xa097, 0x2814, 0x00000000); + nv_mthd(priv, 0xa097, 0x2818, 0x00000000); + nv_mthd(priv, 0xa097, 0x281c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2820, 0x00000000); + nv_mthd(priv, 0xa097, 0x2824, 0x00000000); + nv_mthd(priv, 0xa097, 0x2828, 0x00000000); + nv_mthd(priv, 0xa097, 0x282c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2830, 0x00000000); + nv_mthd(priv, 0xa097, 0x2834, 0x00000000); + nv_mthd(priv, 0xa097, 0x2838, 0x00000000); + nv_mthd(priv, 0xa097, 0x283c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2840, 0x00000000); + nv_mthd(priv, 0xa097, 0x2844, 0x00000000); + nv_mthd(priv, 0xa097, 0x2848, 0x00000000); + nv_mthd(priv, 0xa097, 0x284c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2850, 0x00000000); + nv_mthd(priv, 0xa097, 0x2854, 0x00000000); + nv_mthd(priv, 0xa097, 0x2858, 0x00000000); + nv_mthd(priv, 0xa097, 0x285c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2860, 0x00000000); + nv_mthd(priv, 0xa097, 0x2864, 0x00000000); + nv_mthd(priv, 0xa097, 0x2868, 0x00000000); + nv_mthd(priv, 0xa097, 0x286c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2870, 0x00000000); + nv_mthd(priv, 0xa097, 0x2874, 0x00000000); + nv_mthd(priv, 0xa097, 0x2878, 0x00000000); + nv_mthd(priv, 0xa097, 0x287c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2880, 0x00000000); + nv_mthd(priv, 0xa097, 0x2884, 0x00000000); + nv_mthd(priv, 0xa097, 0x2888, 0x00000000); + nv_mthd(priv, 0xa097, 0x288c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2890, 0x00000000); + nv_mthd(priv, 0xa097, 0x2894, 0x00000000); + nv_mthd(priv, 0xa097, 0x2898, 0x00000000); + nv_mthd(priv, 0xa097, 0x289c, 0x00000000); + nv_mthd(priv, 0xa097, 0x28a0, 0x00000000); + nv_mthd(priv, 0xa097, 0x28a4, 0x00000000); + nv_mthd(priv, 0xa097, 0x28a8, 0x00000000); + nv_mthd(priv, 0xa097, 0x28ac, 0x00000000); + nv_mthd(priv, 0xa097, 0x28b0, 0x00000000); + nv_mthd(priv, 0xa097, 0x28b4, 0x00000000); + nv_mthd(priv, 0xa097, 0x28b8, 0x00000000); + nv_mthd(priv, 0xa097, 0x28bc, 0x00000000); + nv_mthd(priv, 0xa097, 0x28c0, 0x00000000); + nv_mthd(priv, 0xa097, 0x28c4, 0x00000000); + nv_mthd(priv, 0xa097, 0x28c8, 0x00000000); + nv_mthd(priv, 0xa097, 0x28cc, 0x00000000); + nv_mthd(priv, 0xa097, 0x28d0, 0x00000000); + nv_mthd(priv, 0xa097, 0x28d4, 0x00000000); + nv_mthd(priv, 0xa097, 0x28d8, 0x00000000); + nv_mthd(priv, 0xa097, 0x28dc, 0x00000000); + nv_mthd(priv, 0xa097, 0x28e0, 0x00000000); + nv_mthd(priv, 0xa097, 0x28e4, 0x00000000); + nv_mthd(priv, 0xa097, 0x28e8, 0x00000000); + nv_mthd(priv, 0xa097, 0x28ec, 0x00000000); + nv_mthd(priv, 0xa097, 0x28f0, 0x00000000); + nv_mthd(priv, 0xa097, 0x28f4, 0x00000000); + nv_mthd(priv, 0xa097, 0x28f8, 0x00000000); + nv_mthd(priv, 0xa097, 0x28fc, 0x00000000); + nv_mthd(priv, 0xa097, 0x2900, 0x00000000); + nv_mthd(priv, 0xa097, 0x2904, 0x00000000); + nv_mthd(priv, 0xa097, 0x2908, 0x00000000); + nv_mthd(priv, 0xa097, 0x290c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2910, 0x00000000); + nv_mthd(priv, 0xa097, 0x2914, 0x00000000); + nv_mthd(priv, 0xa097, 0x2918, 0x00000000); + nv_mthd(priv, 0xa097, 0x291c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2920, 0x00000000); + nv_mthd(priv, 0xa097, 0x2924, 0x00000000); + nv_mthd(priv, 0xa097, 0x2928, 0x00000000); + nv_mthd(priv, 0xa097, 0x292c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2930, 0x00000000); + nv_mthd(priv, 0xa097, 0x2934, 0x00000000); + nv_mthd(priv, 0xa097, 0x2938, 0x00000000); + nv_mthd(priv, 0xa097, 0x293c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2940, 0x00000000); + nv_mthd(priv, 0xa097, 0x2944, 0x00000000); + nv_mthd(priv, 0xa097, 0x2948, 0x00000000); + nv_mthd(priv, 0xa097, 0x294c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2950, 0x00000000); + nv_mthd(priv, 0xa097, 0x2954, 0x00000000); + nv_mthd(priv, 0xa097, 0x2958, 0x00000000); + nv_mthd(priv, 0xa097, 0x295c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2960, 0x00000000); + nv_mthd(priv, 0xa097, 0x2964, 0x00000000); + nv_mthd(priv, 0xa097, 0x2968, 0x00000000); + nv_mthd(priv, 0xa097, 0x296c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2970, 0x00000000); + nv_mthd(priv, 0xa097, 0x2974, 0x00000000); + nv_mthd(priv, 0xa097, 0x2978, 0x00000000); + nv_mthd(priv, 0xa097, 0x297c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2980, 0x00000000); + nv_mthd(priv, 0xa097, 0x2984, 0x00000000); + nv_mthd(priv, 0xa097, 0x2988, 0x00000000); + nv_mthd(priv, 0xa097, 0x298c, 0x00000000); + nv_mthd(priv, 0xa097, 0x2990, 0x00000000); + nv_mthd(priv, 0xa097, 0x2994, 0x00000000); + nv_mthd(priv, 0xa097, 0x2998, 0x00000000); + nv_mthd(priv, 0xa097, 0x299c, 0x00000000); + nv_mthd(priv, 0xa097, 0x29a0, 0x00000000); + nv_mthd(priv, 0xa097, 0x29a4, 0x00000000); + nv_mthd(priv, 0xa097, 0x29a8, 0x00000000); + nv_mthd(priv, 0xa097, 0x29ac, 0x00000000); + nv_mthd(priv, 0xa097, 0x29b0, 0x00000000); + nv_mthd(priv, 0xa097, 0x29b4, 0x00000000); + nv_mthd(priv, 0xa097, 0x29b8, 0x00000000); + nv_mthd(priv, 0xa097, 0x29bc, 0x00000000); + nv_mthd(priv, 0xa097, 0x29c0, 0x00000000); + nv_mthd(priv, 0xa097, 0x29c4, 0x00000000); + nv_mthd(priv, 0xa097, 0x29c8, 0x00000000); + nv_mthd(priv, 0xa097, 0x29cc, 0x00000000); + nv_mthd(priv, 0xa097, 0x29d0, 0x00000000); + nv_mthd(priv, 0xa097, 0x29d4, 0x00000000); + nv_mthd(priv, 0xa097, 0x29d8, 0x00000000); + nv_mthd(priv, 0xa097, 0x29dc, 0x00000000); + nv_mthd(priv, 0xa097, 0x29e0, 0x00000000); + nv_mthd(priv, 0xa097, 0x29e4, 0x00000000); + nv_mthd(priv, 0xa097, 0x29e8, 0x00000000); + nv_mthd(priv, 0xa097, 0x29ec, 0x00000000); + nv_mthd(priv, 0xa097, 0x29f0, 0x00000000); + nv_mthd(priv, 0xa097, 0x29f4, 0x00000000); + nv_mthd(priv, 0xa097, 0x29f8, 0x00000000); + nv_mthd(priv, 0xa097, 0x29fc, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a00, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a20, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a40, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a60, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a80, 0x00000000); + nv_mthd(priv, 0xa097, 0x0aa0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ac0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ae0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b00, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b20, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b40, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b60, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b80, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ba0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0bc0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0be0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a04, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a24, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a44, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a64, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a84, 0x00000000); + nv_mthd(priv, 0xa097, 0x0aa4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ac4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ae4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b04, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b24, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b44, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b64, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b84, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ba4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0bc4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0be4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a08, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a28, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a48, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a68, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a88, 0x00000000); + nv_mthd(priv, 0xa097, 0x0aa8, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ac8, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ae8, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b08, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b28, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b48, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b68, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b88, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ba8, 0x00000000); + nv_mthd(priv, 0xa097, 0x0bc8, 0x00000000); + nv_mthd(priv, 0xa097, 0x0be8, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a0c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a2c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a4c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a6c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a8c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0aac, 0x00000000); + nv_mthd(priv, 0xa097, 0x0acc, 0x00000000); + nv_mthd(priv, 0xa097, 0x0aec, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b0c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b2c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b4c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b6c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b8c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0bac, 0x00000000); + nv_mthd(priv, 0xa097, 0x0bcc, 0x00000000); + nv_mthd(priv, 0xa097, 0x0bec, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a10, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a30, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a50, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a70, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a90, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ab0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ad0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0af0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b10, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b30, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b50, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b70, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b90, 0x00000000); + nv_mthd(priv, 0xa097, 0x0bb0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0bd0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0bf0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a14, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a34, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a54, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a74, 0x00000000); + nv_mthd(priv, 0xa097, 0x0a94, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ab4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ad4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0af4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b14, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b34, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b54, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b74, 0x00000000); + nv_mthd(priv, 0xa097, 0x0b94, 0x00000000); + nv_mthd(priv, 0xa097, 0x0bb4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0bd4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0bf4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c00, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c10, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c20, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c30, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c40, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c50, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c60, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c70, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c80, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c90, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ca0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0cb0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0cc0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0cd0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ce0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0cf0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c04, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c14, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c24, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c34, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c44, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c54, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c64, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c74, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c84, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c94, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ca4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0cb4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0cc4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0cd4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ce4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0cf4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c08, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c18, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c28, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c38, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c48, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c58, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c68, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c78, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c88, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c98, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ca8, 0x00000000); + nv_mthd(priv, 0xa097, 0x0cb8, 0x00000000); + nv_mthd(priv, 0xa097, 0x0cc8, 0x00000000); + nv_mthd(priv, 0xa097, 0x0cd8, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ce8, 0x00000000); + nv_mthd(priv, 0xa097, 0x0cf8, 0x00000000); + nv_mthd(priv, 0xa097, 0x0c0c, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0c1c, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0c2c, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0c3c, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0c4c, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0c5c, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0c6c, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0c7c, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0c8c, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0c9c, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0cac, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0cbc, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0ccc, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0cdc, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0cec, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0cfc, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0d00, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0d08, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0d10, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0d18, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0d20, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0d28, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0d30, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0d38, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0d04, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0d0c, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0d14, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0d1c, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0d24, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0d2c, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0d34, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0d3c, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e00, 0x00000000); + nv_mthd(priv, 0xa097, 0x0e10, 0x00000000); + nv_mthd(priv, 0xa097, 0x0e20, 0x00000000); + nv_mthd(priv, 0xa097, 0x0e30, 0x00000000); + nv_mthd(priv, 0xa097, 0x0e40, 0x00000000); + nv_mthd(priv, 0xa097, 0x0e50, 0x00000000); + nv_mthd(priv, 0xa097, 0x0e60, 0x00000000); + nv_mthd(priv, 0xa097, 0x0e70, 0x00000000); + nv_mthd(priv, 0xa097, 0x0e80, 0x00000000); + nv_mthd(priv, 0xa097, 0x0e90, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ea0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0eb0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ec0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ed0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ee0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ef0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0e04, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e14, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e24, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e34, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e44, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e54, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e64, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e74, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e84, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e94, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0ea4, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0eb4, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0ec4, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0ed4, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0ee4, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0ef4, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e08, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e18, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e28, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e38, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e48, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e58, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e68, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e78, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e88, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0e98, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0ea8, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0eb8, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0ec8, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0ed8, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0ee8, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0ef8, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0d40, 0x00000000); + nv_mthd(priv, 0xa097, 0x0d48, 0x00000000); + nv_mthd(priv, 0xa097, 0x0d50, 0x00000000); + nv_mthd(priv, 0xa097, 0x0d58, 0x00000000); + nv_mthd(priv, 0xa097, 0x0d44, 0x00000000); + nv_mthd(priv, 0xa097, 0x0d4c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0d54, 0x00000000); + nv_mthd(priv, 0xa097, 0x0d5c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1e00, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e20, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e40, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e60, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e80, 0x00000001); + nv_mthd(priv, 0xa097, 0x1ea0, 0x00000001); + nv_mthd(priv, 0xa097, 0x1ec0, 0x00000001); + nv_mthd(priv, 0xa097, 0x1ee0, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e04, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e24, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e44, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e64, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e84, 0x00000001); + nv_mthd(priv, 0xa097, 0x1ea4, 0x00000001); + nv_mthd(priv, 0xa097, 0x1ec4, 0x00000001); + nv_mthd(priv, 0xa097, 0x1ee4, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e08, 0x00000002); + nv_mthd(priv, 0xa097, 0x1e28, 0x00000002); + nv_mthd(priv, 0xa097, 0x1e48, 0x00000002); + nv_mthd(priv, 0xa097, 0x1e68, 0x00000002); + nv_mthd(priv, 0xa097, 0x1e88, 0x00000002); + nv_mthd(priv, 0xa097, 0x1ea8, 0x00000002); + nv_mthd(priv, 0xa097, 0x1ec8, 0x00000002); + nv_mthd(priv, 0xa097, 0x1ee8, 0x00000002); + nv_mthd(priv, 0xa097, 0x1e0c, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e2c, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e4c, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e6c, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e8c, 0x00000001); + nv_mthd(priv, 0xa097, 0x1eac, 0x00000001); + nv_mthd(priv, 0xa097, 0x1ecc, 0x00000001); + nv_mthd(priv, 0xa097, 0x1eec, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e10, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e30, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e50, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e70, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e90, 0x00000001); + nv_mthd(priv, 0xa097, 0x1eb0, 0x00000001); + nv_mthd(priv, 0xa097, 0x1ed0, 0x00000001); + nv_mthd(priv, 0xa097, 0x1ef0, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e14, 0x00000002); + nv_mthd(priv, 0xa097, 0x1e34, 0x00000002); + nv_mthd(priv, 0xa097, 0x1e54, 0x00000002); + nv_mthd(priv, 0xa097, 0x1e74, 0x00000002); + nv_mthd(priv, 0xa097, 0x1e94, 0x00000002); + nv_mthd(priv, 0xa097, 0x1eb4, 0x00000002); + nv_mthd(priv, 0xa097, 0x1ed4, 0x00000002); + nv_mthd(priv, 0xa097, 0x1ef4, 0x00000002); + nv_mthd(priv, 0xa097, 0x1e18, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e38, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e58, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e78, 0x00000001); + nv_mthd(priv, 0xa097, 0x1e98, 0x00000001); + nv_mthd(priv, 0xa097, 0x1eb8, 0x00000001); + nv_mthd(priv, 0xa097, 0x1ed8, 0x00000001); + nv_mthd(priv, 0xa097, 0x1ef8, 0x00000001); + nv_mthd(priv, 0xa097, 0x3400, 0x00000000); + nv_mthd(priv, 0xa097, 0x3404, 0x00000000); + nv_mthd(priv, 0xa097, 0x3408, 0x00000000); + nv_mthd(priv, 0xa097, 0x340c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3410, 0x00000000); + nv_mthd(priv, 0xa097, 0x3414, 0x00000000); + nv_mthd(priv, 0xa097, 0x3418, 0x00000000); + nv_mthd(priv, 0xa097, 0x341c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3420, 0x00000000); + nv_mthd(priv, 0xa097, 0x3424, 0x00000000); + nv_mthd(priv, 0xa097, 0x3428, 0x00000000); + nv_mthd(priv, 0xa097, 0x342c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3430, 0x00000000); + nv_mthd(priv, 0xa097, 0x3434, 0x00000000); + nv_mthd(priv, 0xa097, 0x3438, 0x00000000); + nv_mthd(priv, 0xa097, 0x343c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3440, 0x00000000); + nv_mthd(priv, 0xa097, 0x3444, 0x00000000); + nv_mthd(priv, 0xa097, 0x3448, 0x00000000); + nv_mthd(priv, 0xa097, 0x344c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3450, 0x00000000); + nv_mthd(priv, 0xa097, 0x3454, 0x00000000); + nv_mthd(priv, 0xa097, 0x3458, 0x00000000); + nv_mthd(priv, 0xa097, 0x345c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3460, 0x00000000); + nv_mthd(priv, 0xa097, 0x3464, 0x00000000); + nv_mthd(priv, 0xa097, 0x3468, 0x00000000); + nv_mthd(priv, 0xa097, 0x346c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3470, 0x00000000); + nv_mthd(priv, 0xa097, 0x3474, 0x00000000); + nv_mthd(priv, 0xa097, 0x3478, 0x00000000); + nv_mthd(priv, 0xa097, 0x347c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3480, 0x00000000); + nv_mthd(priv, 0xa097, 0x3484, 0x00000000); + nv_mthd(priv, 0xa097, 0x3488, 0x00000000); + nv_mthd(priv, 0xa097, 0x348c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3490, 0x00000000); + nv_mthd(priv, 0xa097, 0x3494, 0x00000000); + nv_mthd(priv, 0xa097, 0x3498, 0x00000000); + nv_mthd(priv, 0xa097, 0x349c, 0x00000000); + nv_mthd(priv, 0xa097, 0x34a0, 0x00000000); + nv_mthd(priv, 0xa097, 0x34a4, 0x00000000); + nv_mthd(priv, 0xa097, 0x34a8, 0x00000000); + nv_mthd(priv, 0xa097, 0x34ac, 0x00000000); + nv_mthd(priv, 0xa097, 0x34b0, 0x00000000); + nv_mthd(priv, 0xa097, 0x34b4, 0x00000000); + nv_mthd(priv, 0xa097, 0x34b8, 0x00000000); + nv_mthd(priv, 0xa097, 0x34bc, 0x00000000); + nv_mthd(priv, 0xa097, 0x34c0, 0x00000000); + nv_mthd(priv, 0xa097, 0x34c4, 0x00000000); + nv_mthd(priv, 0xa097, 0x34c8, 0x00000000); + nv_mthd(priv, 0xa097, 0x34cc, 0x00000000); + nv_mthd(priv, 0xa097, 0x34d0, 0x00000000); + nv_mthd(priv, 0xa097, 0x34d4, 0x00000000); + nv_mthd(priv, 0xa097, 0x34d8, 0x00000000); + nv_mthd(priv, 0xa097, 0x34dc, 0x00000000); + nv_mthd(priv, 0xa097, 0x34e0, 0x00000000); + nv_mthd(priv, 0xa097, 0x34e4, 0x00000000); + nv_mthd(priv, 0xa097, 0x34e8, 0x00000000); + nv_mthd(priv, 0xa097, 0x34ec, 0x00000000); + nv_mthd(priv, 0xa097, 0x34f0, 0x00000000); + nv_mthd(priv, 0xa097, 0x34f4, 0x00000000); + nv_mthd(priv, 0xa097, 0x34f8, 0x00000000); + nv_mthd(priv, 0xa097, 0x34fc, 0x00000000); + nv_mthd(priv, 0xa097, 0x3500, 0x00000000); + nv_mthd(priv, 0xa097, 0x3504, 0x00000000); + nv_mthd(priv, 0xa097, 0x3508, 0x00000000); + nv_mthd(priv, 0xa097, 0x350c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3510, 0x00000000); + nv_mthd(priv, 0xa097, 0x3514, 0x00000000); + nv_mthd(priv, 0xa097, 0x3518, 0x00000000); + nv_mthd(priv, 0xa097, 0x351c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3520, 0x00000000); + nv_mthd(priv, 0xa097, 0x3524, 0x00000000); + nv_mthd(priv, 0xa097, 0x3528, 0x00000000); + nv_mthd(priv, 0xa097, 0x352c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3530, 0x00000000); + nv_mthd(priv, 0xa097, 0x3534, 0x00000000); + nv_mthd(priv, 0xa097, 0x3538, 0x00000000); + nv_mthd(priv, 0xa097, 0x353c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3540, 0x00000000); + nv_mthd(priv, 0xa097, 0x3544, 0x00000000); + nv_mthd(priv, 0xa097, 0x3548, 0x00000000); + nv_mthd(priv, 0xa097, 0x354c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3550, 0x00000000); + nv_mthd(priv, 0xa097, 0x3554, 0x00000000); + nv_mthd(priv, 0xa097, 0x3558, 0x00000000); + nv_mthd(priv, 0xa097, 0x355c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3560, 0x00000000); + nv_mthd(priv, 0xa097, 0x3564, 0x00000000); + nv_mthd(priv, 0xa097, 0x3568, 0x00000000); + nv_mthd(priv, 0xa097, 0x356c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3570, 0x00000000); + nv_mthd(priv, 0xa097, 0x3574, 0x00000000); + nv_mthd(priv, 0xa097, 0x3578, 0x00000000); + nv_mthd(priv, 0xa097, 0x357c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3580, 0x00000000); + nv_mthd(priv, 0xa097, 0x3584, 0x00000000); + nv_mthd(priv, 0xa097, 0x3588, 0x00000000); + nv_mthd(priv, 0xa097, 0x358c, 0x00000000); + nv_mthd(priv, 0xa097, 0x3590, 0x00000000); + nv_mthd(priv, 0xa097, 0x3594, 0x00000000); + nv_mthd(priv, 0xa097, 0x3598, 0x00000000); + nv_mthd(priv, 0xa097, 0x359c, 0x00000000); + nv_mthd(priv, 0xa097, 0x35a0, 0x00000000); + nv_mthd(priv, 0xa097, 0x35a4, 0x00000000); + nv_mthd(priv, 0xa097, 0x35a8, 0x00000000); + nv_mthd(priv, 0xa097, 0x35ac, 0x00000000); + nv_mthd(priv, 0xa097, 0x35b0, 0x00000000); + nv_mthd(priv, 0xa097, 0x35b4, 0x00000000); + nv_mthd(priv, 0xa097, 0x35b8, 0x00000000); + nv_mthd(priv, 0xa097, 0x35bc, 0x00000000); + nv_mthd(priv, 0xa097, 0x35c0, 0x00000000); + nv_mthd(priv, 0xa097, 0x35c4, 0x00000000); + nv_mthd(priv, 0xa097, 0x35c8, 0x00000000); + nv_mthd(priv, 0xa097, 0x35cc, 0x00000000); + nv_mthd(priv, 0xa097, 0x35d0, 0x00000000); + nv_mthd(priv, 0xa097, 0x35d4, 0x00000000); + nv_mthd(priv, 0xa097, 0x35d8, 0x00000000); + nv_mthd(priv, 0xa097, 0x35dc, 0x00000000); + nv_mthd(priv, 0xa097, 0x35e0, 0x00000000); + nv_mthd(priv, 0xa097, 0x35e4, 0x00000000); + nv_mthd(priv, 0xa097, 0x35e8, 0x00000000); + nv_mthd(priv, 0xa097, 0x35ec, 0x00000000); + nv_mthd(priv, 0xa097, 0x35f0, 0x00000000); + nv_mthd(priv, 0xa097, 0x35f4, 0x00000000); + nv_mthd(priv, 0xa097, 0x35f8, 0x00000000); + nv_mthd(priv, 0xa097, 0x35fc, 0x00000000); + nv_mthd(priv, 0xa097, 0x030c, 0x00000001); + nv_mthd(priv, 0xa097, 0x1944, 0x00000000); + nv_mthd(priv, 0xa097, 0x1514, 0x00000000); + nv_mthd(priv, 0xa097, 0x0d68, 0x0000ffff); + nv_mthd(priv, 0xa097, 0x121c, 0x0fac6881); + nv_mthd(priv, 0xa097, 0x0fac, 0x00000001); + nv_mthd(priv, 0xa097, 0x1538, 0x00000001); + nv_mthd(priv, 0xa097, 0x0fe0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0fe4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0fe8, 0x00000014); + nv_mthd(priv, 0xa097, 0x0fec, 0x00000040); + nv_mthd(priv, 0xa097, 0x0ff0, 0x00000000); + nv_mthd(priv, 0xa097, 0x179c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1228, 0x00000400); + nv_mthd(priv, 0xa097, 0x122c, 0x00000300); + nv_mthd(priv, 0xa097, 0x1230, 0x00010001); + nv_mthd(priv, 0xa097, 0x07f8, 0x00000000); + nv_mthd(priv, 0xa097, 0x15b4, 0x00000001); + nv_mthd(priv, 0xa097, 0x15cc, 0x00000000); + nv_mthd(priv, 0xa097, 0x1534, 0x00000000); + nv_mthd(priv, 0xa097, 0x0fb0, 0x00000000); + nv_mthd(priv, 0xa097, 0x15d0, 0x00000000); + nv_mthd(priv, 0xa097, 0x153c, 0x00000000); + nv_mthd(priv, 0xa097, 0x16b4, 0x00000003); + nv_mthd(priv, 0xa097, 0x0fbc, 0x0000ffff); + nv_mthd(priv, 0xa097, 0x0fc0, 0x0000ffff); + nv_mthd(priv, 0xa097, 0x0fc4, 0x0000ffff); + nv_mthd(priv, 0xa097, 0x0fc8, 0x0000ffff); + nv_mthd(priv, 0xa097, 0x0df8, 0x00000000); + nv_mthd(priv, 0xa097, 0x0dfc, 0x00000000); + nv_mthd(priv, 0xa097, 0x1948, 0x00000000); + nv_mthd(priv, 0xa097, 0x1970, 0x00000001); + nv_mthd(priv, 0xa097, 0x161c, 0x000009f0); + nv_mthd(priv, 0xa097, 0x0dcc, 0x00000010); + nv_mthd(priv, 0xa097, 0x163c, 0x00000000); + nv_mthd(priv, 0xa097, 0x15e4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1160, 0x25e00040); + nv_mthd(priv, 0xa097, 0x1164, 0x25e00040); + nv_mthd(priv, 0xa097, 0x1168, 0x25e00040); + nv_mthd(priv, 0xa097, 0x116c, 0x25e00040); + nv_mthd(priv, 0xa097, 0x1170, 0x25e00040); + nv_mthd(priv, 0xa097, 0x1174, 0x25e00040); + nv_mthd(priv, 0xa097, 0x1178, 0x25e00040); + nv_mthd(priv, 0xa097, 0x117c, 0x25e00040); + nv_mthd(priv, 0xa097, 0x1180, 0x25e00040); + nv_mthd(priv, 0xa097, 0x1184, 0x25e00040); + nv_mthd(priv, 0xa097, 0x1188, 0x25e00040); + nv_mthd(priv, 0xa097, 0x118c, 0x25e00040); + nv_mthd(priv, 0xa097, 0x1190, 0x25e00040); + nv_mthd(priv, 0xa097, 0x1194, 0x25e00040); + nv_mthd(priv, 0xa097, 0x1198, 0x25e00040); + nv_mthd(priv, 0xa097, 0x119c, 0x25e00040); + nv_mthd(priv, 0xa097, 0x11a0, 0x25e00040); + nv_mthd(priv, 0xa097, 0x11a4, 0x25e00040); + nv_mthd(priv, 0xa097, 0x11a8, 0x25e00040); + nv_mthd(priv, 0xa097, 0x11ac, 0x25e00040); + nv_mthd(priv, 0xa097, 0x11b0, 0x25e00040); + nv_mthd(priv, 0xa097, 0x11b4, 0x25e00040); + nv_mthd(priv, 0xa097, 0x11b8, 0x25e00040); + nv_mthd(priv, 0xa097, 0x11bc, 0x25e00040); + nv_mthd(priv, 0xa097, 0x11c0, 0x25e00040); + nv_mthd(priv, 0xa097, 0x11c4, 0x25e00040); + nv_mthd(priv, 0xa097, 0x11c8, 0x25e00040); + nv_mthd(priv, 0xa097, 0x11cc, 0x25e00040); + nv_mthd(priv, 0xa097, 0x11d0, 0x25e00040); + nv_mthd(priv, 0xa097, 0x11d4, 0x25e00040); + nv_mthd(priv, 0xa097, 0x11d8, 0x25e00040); + nv_mthd(priv, 0xa097, 0x11dc, 0x25e00040); + nv_mthd(priv, 0xa097, 0x1880, 0x00000000); + nv_mthd(priv, 0xa097, 0x1884, 0x00000000); + nv_mthd(priv, 0xa097, 0x1888, 0x00000000); + nv_mthd(priv, 0xa097, 0x188c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1890, 0x00000000); + nv_mthd(priv, 0xa097, 0x1894, 0x00000000); + nv_mthd(priv, 0xa097, 0x1898, 0x00000000); + nv_mthd(priv, 0xa097, 0x189c, 0x00000000); + nv_mthd(priv, 0xa097, 0x18a0, 0x00000000); + nv_mthd(priv, 0xa097, 0x18a4, 0x00000000); + nv_mthd(priv, 0xa097, 0x18a8, 0x00000000); + nv_mthd(priv, 0xa097, 0x18ac, 0x00000000); + nv_mthd(priv, 0xa097, 0x18b0, 0x00000000); + nv_mthd(priv, 0xa097, 0x18b4, 0x00000000); + nv_mthd(priv, 0xa097, 0x18b8, 0x00000000); + nv_mthd(priv, 0xa097, 0x18bc, 0x00000000); + nv_mthd(priv, 0xa097, 0x18c0, 0x00000000); + nv_mthd(priv, 0xa097, 0x18c4, 0x00000000); + nv_mthd(priv, 0xa097, 0x18c8, 0x00000000); + nv_mthd(priv, 0xa097, 0x18cc, 0x00000000); + nv_mthd(priv, 0xa097, 0x18d0, 0x00000000); + nv_mthd(priv, 0xa097, 0x18d4, 0x00000000); + nv_mthd(priv, 0xa097, 0x18d8, 0x00000000); + nv_mthd(priv, 0xa097, 0x18dc, 0x00000000); + nv_mthd(priv, 0xa097, 0x18e0, 0x00000000); + nv_mthd(priv, 0xa097, 0x18e4, 0x00000000); + nv_mthd(priv, 0xa097, 0x18e8, 0x00000000); + nv_mthd(priv, 0xa097, 0x18ec, 0x00000000); + nv_mthd(priv, 0xa097, 0x18f0, 0x00000000); + nv_mthd(priv, 0xa097, 0x18f4, 0x00000000); + nv_mthd(priv, 0xa097, 0x18f8, 0x00000000); + nv_mthd(priv, 0xa097, 0x18fc, 0x00000000); + nv_mthd(priv, 0xa097, 0x0f84, 0x00000000); + nv_mthd(priv, 0xa097, 0x0f88, 0x00000000); + nv_mthd(priv, 0xa097, 0x17c8, 0x00000000); + nv_mthd(priv, 0xa097, 0x17cc, 0x00000000); + nv_mthd(priv, 0xa097, 0x17d0, 0x000000ff); + nv_mthd(priv, 0xa097, 0x17d4, 0xffffffff); + nv_mthd(priv, 0xa097, 0x17d8, 0x00000002); + nv_mthd(priv, 0xa097, 0x17dc, 0x00000000); + nv_mthd(priv, 0xa097, 0x15f4, 0x00000000); + nv_mthd(priv, 0xa097, 0x15f8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1434, 0x00000000); + nv_mthd(priv, 0xa097, 0x1438, 0x00000000); + nv_mthd(priv, 0xa097, 0x0d74, 0x00000000); + nv_mthd(priv, 0xa097, 0x0dec, 0x00000001); + nv_mthd(priv, 0xa097, 0x13a4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1318, 0x00000001); + nv_mthd(priv, 0xa097, 0x1644, 0x00000000); + nv_mthd(priv, 0xa097, 0x0748, 0x00000000); + nv_mthd(priv, 0xa097, 0x0de8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1648, 0x00000000); + nv_mthd(priv, 0xa097, 0x12a4, 0x00000000); + nv_mthd(priv, 0xa097, 0x1120, 0x00000000); + nv_mthd(priv, 0xa097, 0x1124, 0x00000000); + nv_mthd(priv, 0xa097, 0x1128, 0x00000000); + nv_mthd(priv, 0xa097, 0x112c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1118, 0x00000000); + nv_mthd(priv, 0xa097, 0x164c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1658, 0x00000000); + nv_mthd(priv, 0xa097, 0x1910, 0x00000290); + nv_mthd(priv, 0xa097, 0x1518, 0x00000000); + nv_mthd(priv, 0xa097, 0x165c, 0x00000001); + nv_mthd(priv, 0xa097, 0x1520, 0x00000000); + nv_mthd(priv, 0xa097, 0x1604, 0x00000000); + nv_mthd(priv, 0xa097, 0x1570, 0x00000000); + nv_mthd(priv, 0xa097, 0x13b0, 0x3f800000); + nv_mthd(priv, 0xa097, 0x13b4, 0x3f800000); + nv_mthd(priv, 0xa097, 0x020c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1670, 0x30201000); + nv_mthd(priv, 0xa097, 0x1674, 0x70605040); + nv_mthd(priv, 0xa097, 0x1678, 0xb8a89888); + nv_mthd(priv, 0xa097, 0x167c, 0xf8e8d8c8); + nv_mthd(priv, 0xa097, 0x166c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1680, 0x00ffff00); + nv_mthd(priv, 0xa097, 0x12d0, 0x00000003); + nv_mthd(priv, 0xa097, 0x12d4, 0x00000002); + nv_mthd(priv, 0xa097, 0x1684, 0x00000000); + nv_mthd(priv, 0xa097, 0x1688, 0x00000000); + nv_mthd(priv, 0xa097, 0x0dac, 0x00001b02); + nv_mthd(priv, 0xa097, 0x0db0, 0x00001b02); + nv_mthd(priv, 0xa097, 0x0db4, 0x00000000); + nv_mthd(priv, 0xa097, 0x168c, 0x00000000); + nv_mthd(priv, 0xa097, 0x15bc, 0x00000000); + nv_mthd(priv, 0xa097, 0x156c, 0x00000000); + nv_mthd(priv, 0xa097, 0x187c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1110, 0x00000001); + nv_mthd(priv, 0xa097, 0x0dc0, 0x00000000); + nv_mthd(priv, 0xa097, 0x0dc4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0dc8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1234, 0x00000000); + nv_mthd(priv, 0xa097, 0x1690, 0x00000000); + nv_mthd(priv, 0xa097, 0x12ac, 0x00000001); + nv_mthd(priv, 0xa097, 0x0790, 0x00000000); + nv_mthd(priv, 0xa097, 0x0794, 0x00000000); + nv_mthd(priv, 0xa097, 0x0798, 0x00000000); + nv_mthd(priv, 0xa097, 0x079c, 0x00000000); + nv_mthd(priv, 0xa097, 0x07a0, 0x00000000); + nv_mthd(priv, 0xa097, 0x077c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1000, 0x00000010); + nv_mthd(priv, 0xa097, 0x10fc, 0x00000000); + nv_mthd(priv, 0xa097, 0x1290, 0x00000000); + nv_mthd(priv, 0xa097, 0x0218, 0x00000010); + nv_mthd(priv, 0xa097, 0x12d8, 0x00000000); + nv_mthd(priv, 0xa097, 0x12dc, 0x00000010); + nv_mthd(priv, 0xa097, 0x0d94, 0x00000001); + nv_mthd(priv, 0xa097, 0x155c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1560, 0x00000000); + nv_mthd(priv, 0xa097, 0x1564, 0x00000fff); + nv_mthd(priv, 0xa097, 0x1574, 0x00000000); + nv_mthd(priv, 0xa097, 0x1578, 0x00000000); + nv_mthd(priv, 0xa097, 0x157c, 0x000fffff); + nv_mthd(priv, 0xa097, 0x1354, 0x00000000); + nv_mthd(priv, 0xa097, 0x1610, 0x00000012); + nv_mthd(priv, 0xa097, 0x1608, 0x00000000); + nv_mthd(priv, 0xa097, 0x160c, 0x00000000); + nv_mthd(priv, 0xa097, 0x260c, 0x00000000); + nv_mthd(priv, 0xa097, 0x07ac, 0x00000000); + nv_mthd(priv, 0xa097, 0x162c, 0x00000003); + nv_mthd(priv, 0xa097, 0x0210, 0x00000000); + nv_mthd(priv, 0xa097, 0x0320, 0x00000000); + nv_mthd(priv, 0xa097, 0x0324, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0328, 0x3f800000); + nv_mthd(priv, 0xa097, 0x032c, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0330, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0334, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0338, 0x3f800000); + nv_mthd(priv, 0xa097, 0x0750, 0x00000000); + nv_mthd(priv, 0xa097, 0x0760, 0x39291909); + nv_mthd(priv, 0xa097, 0x0764, 0x79695949); + nv_mthd(priv, 0xa097, 0x0768, 0xb9a99989); + nv_mthd(priv, 0xa097, 0x076c, 0xf9e9d9c9); + nv_mthd(priv, 0xa097, 0x0770, 0x30201000); + nv_mthd(priv, 0xa097, 0x0774, 0x70605040); + nv_mthd(priv, 0xa097, 0x0778, 0x00009080); + nv_mthd(priv, 0xa097, 0x0780, 0x39291909); + nv_mthd(priv, 0xa097, 0x0784, 0x79695949); + nv_mthd(priv, 0xa097, 0x0788, 0xb9a99989); + nv_mthd(priv, 0xa097, 0x078c, 0xf9e9d9c9); + nv_mthd(priv, 0xa097, 0x07d0, 0x30201000); + nv_mthd(priv, 0xa097, 0x07d4, 0x70605040); + nv_mthd(priv, 0xa097, 0x07d8, 0x00009080); + nv_mthd(priv, 0xa097, 0x037c, 0x00000001); + nv_mthd(priv, 0xa097, 0x0740, 0x00000000); + nv_mthd(priv, 0xa097, 0x0744, 0x00000000); + nv_mthd(priv, 0xa097, 0x2600, 0x00000000); + nv_mthd(priv, 0xa097, 0x1918, 0x00000000); + nv_mthd(priv, 0xa097, 0x191c, 0x00000900); + nv_mthd(priv, 0xa097, 0x1920, 0x00000405); + nv_mthd(priv, 0xa097, 0x1308, 0x00000001); + nv_mthd(priv, 0xa097, 0x1924, 0x00000000); + nv_mthd(priv, 0xa097, 0x13ac, 0x00000000); + nv_mthd(priv, 0xa097, 0x192c, 0x00000001); + nv_mthd(priv, 0xa097, 0x193c, 0x00002c1c); + nv_mthd(priv, 0xa097, 0x0d7c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0f8c, 0x00000000); + nv_mthd(priv, 0xa097, 0x02c0, 0x00000001); + nv_mthd(priv, 0xa097, 0x1510, 0x00000000); + nv_mthd(priv, 0xa097, 0x1940, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ff4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0ff8, 0x00000000); + nv_mthd(priv, 0xa097, 0x194c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1950, 0x00000000); + nv_mthd(priv, 0xa097, 0x1968, 0x00000000); + nv_mthd(priv, 0xa097, 0x1590, 0x0000003f); + nv_mthd(priv, 0xa097, 0x07e8, 0x00000000); + nv_mthd(priv, 0xa097, 0x07ec, 0x00000000); + nv_mthd(priv, 0xa097, 0x07f0, 0x00000000); + nv_mthd(priv, 0xa097, 0x07f4, 0x00000000); + nv_mthd(priv, 0xa097, 0x196c, 0x00000011); + nv_mthd(priv, 0xa097, 0x02e4, 0x0000b001); + nv_mthd(priv, 0xa097, 0x036c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0370, 0x00000000); + nv_mthd(priv, 0xa097, 0x197c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0fcc, 0x00000000); + nv_mthd(priv, 0xa097, 0x0fd0, 0x00000000); + nv_mthd(priv, 0xa097, 0x02d8, 0x00000040); + nv_mthd(priv, 0xa097, 0x1980, 0x00000080); + nv_mthd(priv, 0xa097, 0x1504, 0x00000080); + nv_mthd(priv, 0xa097, 0x1984, 0x00000000); + nv_mthd(priv, 0xa097, 0x0300, 0x00000001); + nv_mthd(priv, 0xa097, 0x13a8, 0x00000000); + nv_mthd(priv, 0xa097, 0x12ec, 0x00000000); + nv_mthd(priv, 0xa097, 0x1310, 0x00000000); + nv_mthd(priv, 0xa097, 0x1314, 0x00000001); + nv_mthd(priv, 0xa097, 0x1380, 0x00000000); + nv_mthd(priv, 0xa097, 0x1384, 0x00000001); + nv_mthd(priv, 0xa097, 0x1388, 0x00000001); + nv_mthd(priv, 0xa097, 0x138c, 0x00000001); + nv_mthd(priv, 0xa097, 0x1390, 0x00000001); + nv_mthd(priv, 0xa097, 0x1394, 0x00000000); + nv_mthd(priv, 0xa097, 0x139c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1398, 0x00000000); + nv_mthd(priv, 0xa097, 0x1594, 0x00000000); + nv_mthd(priv, 0xa097, 0x1598, 0x00000001); + nv_mthd(priv, 0xa097, 0x159c, 0x00000001); + nv_mthd(priv, 0xa097, 0x15a0, 0x00000001); + nv_mthd(priv, 0xa097, 0x15a4, 0x00000001); + nv_mthd(priv, 0xa097, 0x0f54, 0x00000000); + nv_mthd(priv, 0xa097, 0x0f58, 0x00000000); + nv_mthd(priv, 0xa097, 0x0f5c, 0x00000000); + nv_mthd(priv, 0xa097, 0x19bc, 0x00000000); + nv_mthd(priv, 0xa097, 0x0f9c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0fa0, 0x00000000); + nv_mthd(priv, 0xa097, 0x12cc, 0x00000000); + nv_mthd(priv, 0xa097, 0x12e8, 0x00000000); + nv_mthd(priv, 0xa097, 0x130c, 0x00000001); + nv_mthd(priv, 0xa097, 0x1360, 0x00000000); + nv_mthd(priv, 0xa097, 0x1364, 0x00000000); + nv_mthd(priv, 0xa097, 0x1368, 0x00000000); + nv_mthd(priv, 0xa097, 0x136c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1370, 0x00000000); + nv_mthd(priv, 0xa097, 0x1374, 0x00000000); + nv_mthd(priv, 0xa097, 0x1378, 0x00000000); + nv_mthd(priv, 0xa097, 0x137c, 0x00000000); + nv_mthd(priv, 0xa097, 0x133c, 0x00000001); + nv_mthd(priv, 0xa097, 0x1340, 0x00000001); + nv_mthd(priv, 0xa097, 0x1344, 0x00000002); + nv_mthd(priv, 0xa097, 0x1348, 0x00000001); + nv_mthd(priv, 0xa097, 0x134c, 0x00000001); + nv_mthd(priv, 0xa097, 0x1350, 0x00000002); + nv_mthd(priv, 0xa097, 0x1358, 0x00000001); + nv_mthd(priv, 0xa097, 0x12e4, 0x00000000); + nv_mthd(priv, 0xa097, 0x131c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1320, 0x00000000); + nv_mthd(priv, 0xa097, 0x1324, 0x00000000); + nv_mthd(priv, 0xa097, 0x1328, 0x00000000); + nv_mthd(priv, 0xa097, 0x19c0, 0x00000000); + nv_mthd(priv, 0xa097, 0x1140, 0x00000000); + nv_mthd(priv, 0xa097, 0x19c4, 0x00000000); + nv_mthd(priv, 0xa097, 0x19c8, 0x00001500); + nv_mthd(priv, 0xa097, 0x135c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0f90, 0x00000000); + nv_mthd(priv, 0xa097, 0x19e0, 0x00000001); + nv_mthd(priv, 0xa097, 0x19e4, 0x00000001); + nv_mthd(priv, 0xa097, 0x19e8, 0x00000001); + nv_mthd(priv, 0xa097, 0x19ec, 0x00000001); + nv_mthd(priv, 0xa097, 0x19f0, 0x00000001); + nv_mthd(priv, 0xa097, 0x19f4, 0x00000001); + nv_mthd(priv, 0xa097, 0x19f8, 0x00000001); + nv_mthd(priv, 0xa097, 0x19fc, 0x00000001); + nv_mthd(priv, 0xa097, 0x19cc, 0x00000001); + nv_mthd(priv, 0xa097, 0x15b8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1a00, 0x00001111); + nv_mthd(priv, 0xa097, 0x1a04, 0x00000000); + nv_mthd(priv, 0xa097, 0x1a08, 0x00000000); + nv_mthd(priv, 0xa097, 0x1a0c, 0x00000000); + nv_mthd(priv, 0xa097, 0x1a10, 0x00000000); + nv_mthd(priv, 0xa097, 0x1a14, 0x00000000); + nv_mthd(priv, 0xa097, 0x1a18, 0x00000000); + nv_mthd(priv, 0xa097, 0x1a1c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0d6c, 0xffff0000); + nv_mthd(priv, 0xa097, 0x0d70, 0xffff0000); + nv_mthd(priv, 0xa097, 0x10f8, 0x00001010); + nv_mthd(priv, 0xa097, 0x0d80, 0x00000000); + nv_mthd(priv, 0xa097, 0x0d84, 0x00000000); + nv_mthd(priv, 0xa097, 0x0d88, 0x00000000); + nv_mthd(priv, 0xa097, 0x0d8c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0d90, 0x00000000); + nv_mthd(priv, 0xa097, 0x0da0, 0x00000000); + nv_mthd(priv, 0xa097, 0x07a4, 0x00000000); + nv_mthd(priv, 0xa097, 0x07a8, 0x00000000); + nv_mthd(priv, 0xa097, 0x1508, 0x80000000); + nv_mthd(priv, 0xa097, 0x150c, 0x40000000); + nv_mthd(priv, 0xa097, 0x1668, 0x00000000); + nv_mthd(priv, 0xa097, 0x0318, 0x00000008); + nv_mthd(priv, 0xa097, 0x031c, 0x00000008); + nv_mthd(priv, 0xa097, 0x0d9c, 0x00000001); + nv_mthd(priv, 0xa097, 0x0374, 0x00000000); + nv_mthd(priv, 0xa097, 0x0378, 0x00000020); + nv_mthd(priv, 0xa097, 0x07dc, 0x00000000); + nv_mthd(priv, 0xa097, 0x074c, 0x00000055); + nv_mthd(priv, 0xa097, 0x1420, 0x00000003); + nv_mthd(priv, 0xa097, 0x17bc, 0x00000000); + nv_mthd(priv, 0xa097, 0x17c0, 0x00000000); + nv_mthd(priv, 0xa097, 0x17c4, 0x00000001); + nv_mthd(priv, 0xa097, 0x1008, 0x00000008); + nv_mthd(priv, 0xa097, 0x100c, 0x00000040); + nv_mthd(priv, 0xa097, 0x1010, 0x0000012c); + nv_mthd(priv, 0xa097, 0x0d60, 0x00000040); + nv_mthd(priv, 0xa097, 0x075c, 0x00000003); + nv_mthd(priv, 0xa097, 0x1018, 0x00000020); + nv_mthd(priv, 0xa097, 0x101c, 0x00000001); + nv_mthd(priv, 0xa097, 0x1020, 0x00000020); + nv_mthd(priv, 0xa097, 0x1024, 0x00000001); + nv_mthd(priv, 0xa097, 0x1444, 0x00000000); + nv_mthd(priv, 0xa097, 0x1448, 0x00000000); + nv_mthd(priv, 0xa097, 0x144c, 0x00000000); + nv_mthd(priv, 0xa097, 0x0360, 0x20164010); + nv_mthd(priv, 0xa097, 0x0364, 0x00000020); + nv_mthd(priv, 0xa097, 0x0368, 0x00000000); + nv_mthd(priv, 0xa097, 0x0de4, 0x00000000); + nv_mthd(priv, 0xa097, 0x0204, 0x00000006); + nv_mthd(priv, 0xa097, 0x0208, 0x00000000); + nv_mthd(priv, 0xa097, 0x02cc, 0x003fffff); + nv_mthd(priv, 0xa097, 0x02d0, 0x003fffff); + nv_mthd(priv, 0xa097, 0x1220, 0x00000005); + nv_mthd(priv, 0xa097, 0x0fdc, 0x00000000); + nv_mthd(priv, 0xa097, 0x0f98, 0x00400008); + nv_mthd(priv, 0xa097, 0x1284, 0x08000080); + nv_mthd(priv, 0xa097, 0x1450, 0x00400008); + nv_mthd(priv, 0xa097, 0x1454, 0x08000080); + nv_mthd(priv, 0xa097, 0x0214, 0x00000000); +} + +static void +nve0_grctx_generate_902d(struct nvc0_graph_priv *priv) +{ + nv_mthd(priv, 0x902d, 0x0200, 0x000000cf); + nv_mthd(priv, 0x902d, 0x0204, 0x00000001); + nv_mthd(priv, 0x902d, 0x0208, 0x00000020); + nv_mthd(priv, 0x902d, 0x020c, 0x00000001); + nv_mthd(priv, 0x902d, 0x0210, 0x00000000); + nv_mthd(priv, 0x902d, 0x0214, 0x00000080); + nv_mthd(priv, 0x902d, 0x0218, 0x00000100); + nv_mthd(priv, 0x902d, 0x021c, 0x00000100); + nv_mthd(priv, 0x902d, 0x0220, 0x00000000); + nv_mthd(priv, 0x902d, 0x0224, 0x00000000); + nv_mthd(priv, 0x902d, 0x0230, 0x000000cf); + nv_mthd(priv, 0x902d, 0x0234, 0x00000001); + nv_mthd(priv, 0x902d, 0x0238, 0x00000020); + nv_mthd(priv, 0x902d, 0x023c, 0x00000001); + nv_mthd(priv, 0x902d, 0x0244, 0x00000080); + nv_mthd(priv, 0x902d, 0x0248, 0x00000100); + nv_mthd(priv, 0x902d, 0x024c, 0x00000100); + nv_mthd(priv, 0x902d, 0x3410, 0x00000000); +} + +static void +nve0_graph_generate_unk40xx(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x404010, 0x0); + nv_wr32(priv, 0x404014, 0x0); + nv_wr32(priv, 0x404018, 0x0); + nv_wr32(priv, 0x40401c, 0x0); + nv_wr32(priv, 0x404020, 0x0); + nv_wr32(priv, 0x404024, 0xe000); + nv_wr32(priv, 0x404028, 0x0); + nv_wr32(priv, 0x4040a8, 0x0); + nv_wr32(priv, 0x4040ac, 0x0); + nv_wr32(priv, 0x4040b0, 0x0); + nv_wr32(priv, 0x4040b4, 0x0); + nv_wr32(priv, 0x4040b8, 0x0); + nv_wr32(priv, 0x4040bc, 0x0); + nv_wr32(priv, 0x4040c0, 0x0); + nv_wr32(priv, 0x4040c4, 0x0); + nv_wr32(priv, 0x4040c8, 0xf800008f); + nv_wr32(priv, 0x4040d0, 0x0); + nv_wr32(priv, 0x4040d4, 0x0); + nv_wr32(priv, 0x4040d8, 0x0); + nv_wr32(priv, 0x4040dc, 0x0); + nv_wr32(priv, 0x4040e0, 0x0); + nv_wr32(priv, 0x4040e4, 0x0); + nv_wr32(priv, 0x4040e8, 0x1000); + nv_wr32(priv, 0x4040f8, 0x0); + nv_wr32(priv, 0x404130, 0x0); + nv_wr32(priv, 0x404134, 0x0); + nv_wr32(priv, 0x404138, 0x20000040); + nv_wr32(priv, 0x404150, 0x2e); + nv_wr32(priv, 0x404154, 0x400); + nv_wr32(priv, 0x404158, 0x200); + nv_wr32(priv, 0x404164, 0x55); + nv_wr32(priv, 0x4041a0, 0x0); + nv_wr32(priv, 0x4041a4, 0x0); + nv_wr32(priv, 0x4041a8, 0x0); + nv_wr32(priv, 0x4041ac, 0x0); + nv_wr32(priv, 0x404200, 0x0); + nv_wr32(priv, 0x404204, 0x0); + nv_wr32(priv, 0x404208, 0x0); + nv_wr32(priv, 0x40420c, 0x0); +} + +static void +nve0_graph_generate_unk44xx(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x404404, 0x0); + nv_wr32(priv, 0x404408, 0x0); + nv_wr32(priv, 0x40440c, 0x0); + nv_wr32(priv, 0x404410, 0x0); + nv_wr32(priv, 0x404414, 0x0); + nv_wr32(priv, 0x404418, 0x0); + nv_wr32(priv, 0x40441c, 0x0); + nv_wr32(priv, 0x404420, 0x0); + nv_wr32(priv, 0x404424, 0x0); + nv_wr32(priv, 0x404428, 0x0); + nv_wr32(priv, 0x40442c, 0x0); + nv_wr32(priv, 0x404430, 0x0); + nv_wr32(priv, 0x404434, 0x0); + nv_wr32(priv, 0x404438, 0x0); + nv_wr32(priv, 0x404460, 0x0); + nv_wr32(priv, 0x404464, 0x0); + nv_wr32(priv, 0x404468, 0xffffff); + nv_wr32(priv, 0x40446c, 0x0); + nv_wr32(priv, 0x404480, 0x1); + nv_wr32(priv, 0x404498, 0x1); +} + +static void +nve0_graph_generate_unk46xx(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x404604, 0x14); + nv_wr32(priv, 0x404608, 0x0); + nv_wr32(priv, 0x40460c, 0x3fff); + nv_wr32(priv, 0x404610, 0x100); + nv_wr32(priv, 0x404618, 0x0); + nv_wr32(priv, 0x40461c, 0x0); + nv_wr32(priv, 0x404620, 0x0); + nv_wr32(priv, 0x404624, 0x0); + nv_wr32(priv, 0x40462c, 0x0); + nv_wr32(priv, 0x404630, 0x0); + nv_wr32(priv, 0x404640, 0x0); + nv_wr32(priv, 0x404654, 0x0); + nv_wr32(priv, 0x404660, 0x0); + nv_wr32(priv, 0x404678, 0x0); + nv_wr32(priv, 0x40467c, 0x2); + nv_wr32(priv, 0x404680, 0x0); + nv_wr32(priv, 0x404684, 0x0); + nv_wr32(priv, 0x404688, 0x0); + nv_wr32(priv, 0x40468c, 0x0); + nv_wr32(priv, 0x404690, 0x0); + nv_wr32(priv, 0x404694, 0x0); + nv_wr32(priv, 0x404698, 0x0); + nv_wr32(priv, 0x40469c, 0x0); + nv_wr32(priv, 0x4046a0, 0x7f0080); + nv_wr32(priv, 0x4046a4, 0x0); + nv_wr32(priv, 0x4046a8, 0x0); + nv_wr32(priv, 0x4046ac, 0x0); + nv_wr32(priv, 0x4046b0, 0x0); + nv_wr32(priv, 0x4046b4, 0x0); + nv_wr32(priv, 0x4046b8, 0x0); + nv_wr32(priv, 0x4046bc, 0x0); + nv_wr32(priv, 0x4046c0, 0x0); + nv_wr32(priv, 0x4046c8, 0x0); + nv_wr32(priv, 0x4046cc, 0x0); + nv_wr32(priv, 0x4046d0, 0x0); +} + +static void +nve0_graph_generate_unk47xx(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x404700, 0x0); + nv_wr32(priv, 0x404704, 0x0); + nv_wr32(priv, 0x404708, 0x0); + nv_wr32(priv, 0x404718, 0x0); + nv_wr32(priv, 0x40471c, 0x0); + nv_wr32(priv, 0x404720, 0x0); + nv_wr32(priv, 0x404724, 0x0); + nv_wr32(priv, 0x404728, 0x0); + nv_wr32(priv, 0x40472c, 0x0); + nv_wr32(priv, 0x404730, 0x0); + nv_wr32(priv, 0x404734, 0x100); + nv_wr32(priv, 0x404738, 0x0); + nv_wr32(priv, 0x40473c, 0x0); + nv_wr32(priv, 0x404744, 0x0); + nv_wr32(priv, 0x404748, 0x0); + nv_wr32(priv, 0x404754, 0x0); +} + +static void +nve0_graph_generate_unk58xx(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x405800, 0xf8000bf); + nv_wr32(priv, 0x405830, 0x2180648); + nv_wr32(priv, 0x405834, 0x8000000); + nv_wr32(priv, 0x405838, 0x0); + nv_wr32(priv, 0x405854, 0x0); + nv_wr32(priv, 0x405870, 0x1); + nv_wr32(priv, 0x405874, 0x1); + nv_wr32(priv, 0x405878, 0x1); + nv_wr32(priv, 0x40587c, 0x1); + nv_wr32(priv, 0x405a00, 0x0); + nv_wr32(priv, 0x405a04, 0x0); + nv_wr32(priv, 0x405a18, 0x0); + nv_wr32(priv, 0x405b00, 0x0); + nv_wr32(priv, 0x405b10, 0x1000); +} + +static void +nve0_graph_generate_unk60xx(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x406020, 0x4103c1); + nv_wr32(priv, 0x406028, 0x1); + nv_wr32(priv, 0x40602c, 0x1); + nv_wr32(priv, 0x406030, 0x1); + nv_wr32(priv, 0x406034, 0x1); +} + +static void +nve0_graph_generate_unk64xx(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x4064a8, 0x0); + nv_wr32(priv, 0x4064ac, 0x3fff); + nv_wr32(priv, 0x4064b4, 0x0); + nv_wr32(priv, 0x4064b8, 0x0); + nv_wr32(priv, 0x4064c0, 0x801a00f0); + nv_wr32(priv, 0x4064c4, 0x192ffff); + nv_wr32(priv, 0x4064c8, 0x1800600); + nv_wr32(priv, 0x4064cc, 0x0); + nv_wr32(priv, 0x4064d0, 0x0); + nv_wr32(priv, 0x4064d4, 0x0); + nv_wr32(priv, 0x4064d8, 0x0); + nv_wr32(priv, 0x4064dc, 0x0); + nv_wr32(priv, 0x4064e0, 0x0); + nv_wr32(priv, 0x4064e4, 0x0); + nv_wr32(priv, 0x4064e8, 0x0); + nv_wr32(priv, 0x4064ec, 0x0); + nv_wr32(priv, 0x4064fc, 0x22a); +} + +static void +nve0_graph_generate_unk70xx(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x407040, 0x0); +} + +static void +nve0_graph_generate_unk78xx(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x407804, 0x23); + nv_wr32(priv, 0x40780c, 0xa418820); + nv_wr32(priv, 0x407810, 0x62080e6); + nv_wr32(priv, 0x407814, 0x20398a4); + nv_wr32(priv, 0x407818, 0xe629062); + nv_wr32(priv, 0x40781c, 0xa418820); + nv_wr32(priv, 0x407820, 0xe6); + nv_wr32(priv, 0x4078bc, 0x103); +} + +static void +nve0_graph_generate_unk80xx(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x408000, 0x0); + nv_wr32(priv, 0x408004, 0x0); + nv_wr32(priv, 0x408008, 0x30); + nv_wr32(priv, 0x40800c, 0x0); + nv_wr32(priv, 0x408010, 0x0); + nv_wr32(priv, 0x408014, 0x69); + nv_wr32(priv, 0x408018, 0xe100e100); + nv_wr32(priv, 0x408064, 0x0); +} + +static void +nve0_graph_generate_unk88xx(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x408800, 0x2802a3c); + nv_wr32(priv, 0x408804, 0x40); + nv_wr32(priv, 0x408808, 0x1043e005); + nv_wr32(priv, 0x408840, 0xb); + nv_wr32(priv, 0x408900, 0x3080b801); + nv_wr32(priv, 0x408904, 0x62000001); + nv_wr32(priv, 0x408908, 0xc8102f); + nv_wr32(priv, 0x408980, 0x11d); +} + +static void +nve0_graph_generate_gpc(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x418380, 0x16); + nv_wr32(priv, 0x418400, 0x38004e00); + nv_wr32(priv, 0x418404, 0x71e0ffff); + nv_wr32(priv, 0x41840c, 0x1008); + nv_wr32(priv, 0x418410, 0xfff0fff); + nv_wr32(priv, 0x418414, 0x2200fff); + nv_wr32(priv, 0x418450, 0x0); + nv_wr32(priv, 0x418454, 0x0); + nv_wr32(priv, 0x418458, 0x0); + nv_wr32(priv, 0x41845c, 0x0); + nv_wr32(priv, 0x418460, 0x0); + nv_wr32(priv, 0x418464, 0x0); + nv_wr32(priv, 0x418468, 0x1); + nv_wr32(priv, 0x41846c, 0x0); + nv_wr32(priv, 0x418470, 0x0); + nv_wr32(priv, 0x418600, 0x1f); + nv_wr32(priv, 0x418684, 0xf); + nv_wr32(priv, 0x418700, 0x2); + nv_wr32(priv, 0x418704, 0x80); + nv_wr32(priv, 0x418708, 0x0); + nv_wr32(priv, 0x41870c, 0x0); + nv_wr32(priv, 0x418710, 0x0); + nv_wr32(priv, 0x418800, 0x7006860a); + nv_wr32(priv, 0x418808, 0x0); + nv_wr32(priv, 0x41880c, 0x0); + nv_wr32(priv, 0x418810, 0x0); + nv_wr32(priv, 0x418828, 0x44); + nv_wr32(priv, 0x418830, 0x10000001); + nv_wr32(priv, 0x4188d8, 0x8); + nv_wr32(priv, 0x4188e0, 0x1000000); + nv_wr32(priv, 0x4188e8, 0x0); + nv_wr32(priv, 0x4188ec, 0x0); + nv_wr32(priv, 0x4188f0, 0x0); + nv_wr32(priv, 0x4188f4, 0x0); + nv_wr32(priv, 0x4188f8, 0x0); + nv_wr32(priv, 0x4188fc, 0x20100018); + nv_wr32(priv, 0x41891c, 0xff00ff); + nv_wr32(priv, 0x418924, 0x0); + nv_wr32(priv, 0x418928, 0xffff00); + nv_wr32(priv, 0x41892c, 0xff00); + nv_wr32(priv, 0x418a00, 0x0); + nv_wr32(priv, 0x418a04, 0x0); + nv_wr32(priv, 0x418a08, 0x0); + nv_wr32(priv, 0x418a0c, 0x10000); + nv_wr32(priv, 0x418a10, 0x0); + nv_wr32(priv, 0x418a14, 0x0); + nv_wr32(priv, 0x418a18, 0x0); + nv_wr32(priv, 0x418a20, 0x0); + nv_wr32(priv, 0x418a24, 0x0); + nv_wr32(priv, 0x418a28, 0x0); + nv_wr32(priv, 0x418a2c, 0x10000); + nv_wr32(priv, 0x418a30, 0x0); + nv_wr32(priv, 0x418a34, 0x0); + nv_wr32(priv, 0x418a38, 0x0); + nv_wr32(priv, 0x418a40, 0x0); + nv_wr32(priv, 0x418a44, 0x0); + nv_wr32(priv, 0x418a48, 0x0); + nv_wr32(priv, 0x418a4c, 0x10000); + nv_wr32(priv, 0x418a50, 0x0); + nv_wr32(priv, 0x418a54, 0x0); + nv_wr32(priv, 0x418a58, 0x0); + nv_wr32(priv, 0x418a60, 0x0); + nv_wr32(priv, 0x418a64, 0x0); + nv_wr32(priv, 0x418a68, 0x0); + nv_wr32(priv, 0x418a6c, 0x10000); + nv_wr32(priv, 0x418a70, 0x0); + nv_wr32(priv, 0x418a74, 0x0); + nv_wr32(priv, 0x418a78, 0x0); + nv_wr32(priv, 0x418a80, 0x0); + nv_wr32(priv, 0x418a84, 0x0); + nv_wr32(priv, 0x418a88, 0x0); + nv_wr32(priv, 0x418a8c, 0x10000); + nv_wr32(priv, 0x418a90, 0x0); + nv_wr32(priv, 0x418a94, 0x0); + nv_wr32(priv, 0x418a98, 0x0); + nv_wr32(priv, 0x418aa0, 0x0); + nv_wr32(priv, 0x418aa4, 0x0); + nv_wr32(priv, 0x418aa8, 0x0); + nv_wr32(priv, 0x418aac, 0x10000); + nv_wr32(priv, 0x418ab0, 0x0); + nv_wr32(priv, 0x418ab4, 0x0); + nv_wr32(priv, 0x418ab8, 0x0); + nv_wr32(priv, 0x418ac0, 0x0); + nv_wr32(priv, 0x418ac4, 0x0); + nv_wr32(priv, 0x418ac8, 0x0); + nv_wr32(priv, 0x418acc, 0x10000); + nv_wr32(priv, 0x418ad0, 0x0); + nv_wr32(priv, 0x418ad4, 0x0); + nv_wr32(priv, 0x418ad8, 0x0); + nv_wr32(priv, 0x418ae0, 0x0); + nv_wr32(priv, 0x418ae4, 0x0); + nv_wr32(priv, 0x418ae8, 0x0); + nv_wr32(priv, 0x418aec, 0x10000); + nv_wr32(priv, 0x418af0, 0x0); + nv_wr32(priv, 0x418af4, 0x0); + nv_wr32(priv, 0x418af8, 0x0); + nv_wr32(priv, 0x418b00, 0x6); + nv_wr32(priv, 0x418b08, 0xa418820); + nv_wr32(priv, 0x418b0c, 0x62080e6); + nv_wr32(priv, 0x418b10, 0x20398a4); + nv_wr32(priv, 0x418b14, 0xe629062); + nv_wr32(priv, 0x418b18, 0xa418820); + nv_wr32(priv, 0x418b1c, 0xe6); + nv_wr32(priv, 0x418bb8, 0x103); + nv_wr32(priv, 0x418c08, 0x1); + nv_wr32(priv, 0x418c10, 0x0); + nv_wr32(priv, 0x418c14, 0x0); + nv_wr32(priv, 0x418c18, 0x0); + nv_wr32(priv, 0x418c1c, 0x0); + nv_wr32(priv, 0x418c20, 0x0); + nv_wr32(priv, 0x418c24, 0x0); + nv_wr32(priv, 0x418c28, 0x0); + nv_wr32(priv, 0x418c2c, 0x0); + nv_wr32(priv, 0x418c40, 0xffffffff); + nv_wr32(priv, 0x418c6c, 0x1); + nv_wr32(priv, 0x418c80, 0x20200004); + nv_wr32(priv, 0x418c8c, 0x1); + nv_wr32(priv, 0x419000, 0x780); + nv_wr32(priv, 0x419004, 0x0); + nv_wr32(priv, 0x419008, 0x0); + nv_wr32(priv, 0x419014, 0x4); +} + +static void +nve0_graph_generate_tpc(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x419848, 0x0); + nv_wr32(priv, 0x419864, 0x129); + nv_wr32(priv, 0x419888, 0x0); + nv_wr32(priv, 0x419a00, 0xf0); + nv_wr32(priv, 0x419a04, 0x1); + nv_wr32(priv, 0x419a08, 0x21); + nv_wr32(priv, 0x419a0c, 0x20000); + nv_wr32(priv, 0x419a10, 0x0); + nv_wr32(priv, 0x419a14, 0x200); + nv_wr32(priv, 0x419a1c, 0xc000); + nv_wr32(priv, 0x419a20, 0x800); + nv_wr32(priv, 0x419a30, 0x1); + nv_wr32(priv, 0x419ac4, 0x37f440); + nv_wr32(priv, 0x419c00, 0xa); + nv_wr32(priv, 0x419c04, 0x80000006); + nv_wr32(priv, 0x419c08, 0x2); + nv_wr32(priv, 0x419c20, 0x0); + nv_wr32(priv, 0x419c24, 0x84210); + nv_wr32(priv, 0x419c28, 0x3efbefbe); + nv_wr32(priv, 0x419ce8, 0x0); + nv_wr32(priv, 0x419cf4, 0x3203); + nv_wr32(priv, 0x419e04, 0x0); + nv_wr32(priv, 0x419e08, 0x0); + nv_wr32(priv, 0x419e0c, 0x0); + nv_wr32(priv, 0x419e10, 0x402); + nv_wr32(priv, 0x419e44, 0x13eff2); + nv_wr32(priv, 0x419e48, 0x0); + nv_wr32(priv, 0x419e4c, 0x7f); + nv_wr32(priv, 0x419e50, 0x0); + nv_wr32(priv, 0x419e54, 0x0); + nv_wr32(priv, 0x419e58, 0x0); + nv_wr32(priv, 0x419e5c, 0x0); + nv_wr32(priv, 0x419e60, 0x0); + nv_wr32(priv, 0x419e64, 0x0); + nv_wr32(priv, 0x419e68, 0x0); + nv_wr32(priv, 0x419e6c, 0x0); + nv_wr32(priv, 0x419e70, 0x0); + nv_wr32(priv, 0x419e74, 0x0); + nv_wr32(priv, 0x419e78, 0x0); + nv_wr32(priv, 0x419e7c, 0x0); + nv_wr32(priv, 0x419e80, 0x0); + nv_wr32(priv, 0x419e84, 0x0); + nv_wr32(priv, 0x419e88, 0x0); + nv_wr32(priv, 0x419e8c, 0x0); + nv_wr32(priv, 0x419e90, 0x0); + nv_wr32(priv, 0x419e94, 0x0); + nv_wr32(priv, 0x419e98, 0x0); + nv_wr32(priv, 0x419eac, 0x1fcf); + nv_wr32(priv, 0x419eb0, 0xd3f); + nv_wr32(priv, 0x419ec8, 0x1304f); + nv_wr32(priv, 0x419f30, 0x0); + nv_wr32(priv, 0x419f34, 0x0); + nv_wr32(priv, 0x419f38, 0x0); + nv_wr32(priv, 0x419f3c, 0x0); + nv_wr32(priv, 0x419f40, 0x0); + nv_wr32(priv, 0x419f44, 0x0); + nv_wr32(priv, 0x419f48, 0x0); + nv_wr32(priv, 0x419f4c, 0x0); + nv_wr32(priv, 0x419f58, 0x0); + nv_wr32(priv, 0x419f78, 0xb); +} + +static void +nve0_graph_generate_tpcunk(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x41be24, 0x6); + nv_wr32(priv, 0x41bec0, 0x12180000); + nv_wr32(priv, 0x41bec4, 0x37f7f); + nv_wr32(priv, 0x41bee4, 0x6480430); + nv_wr32(priv, 0x41bf00, 0xa418820); + nv_wr32(priv, 0x41bf04, 0x62080e6); + nv_wr32(priv, 0x41bf08, 0x20398a4); + nv_wr32(priv, 0x41bf0c, 0xe629062); + nv_wr32(priv, 0x41bf10, 0xa418820); + nv_wr32(priv, 0x41bf14, 0xe6); + nv_wr32(priv, 0x41bfd0, 0x900103); + nv_wr32(priv, 0x41bfe0, 0x400001); + nv_wr32(priv, 0x41bfe4, 0x0); +} + +int +nve0_grctx_generate(struct nvc0_graph_priv *priv) +{ + struct nvc0_grctx info; + int ret, i, gpc, tpc, id; + u32 data[6] = {}, data2[2] = {}, tmp; + u32 tpc_set = 0, tpc_mask = 0; + u32 magic[GPC_MAX][2], offset; + u8 tpcnr[GPC_MAX], a, b; + u8 shift, ntpcv; + + ret = nvc0_grctx_init(priv, &info); + if (ret) + return ret; + + nv_mask(priv, 0x000260, 0x00000001, 0x00000000); + nv_wr32(priv, 0x400204, 0x00000000); + nv_wr32(priv, 0x400208, 0x00000000); + + nve0_graph_generate_unk40xx(priv); + nve0_graph_generate_unk44xx(priv); + nve0_graph_generate_unk46xx(priv); + nve0_graph_generate_unk47xx(priv); + nve0_graph_generate_unk58xx(priv); + nve0_graph_generate_unk60xx(priv); + nve0_graph_generate_unk64xx(priv); + nve0_graph_generate_unk70xx(priv); + nve0_graph_generate_unk78xx(priv); + nve0_graph_generate_unk80xx(priv); + nve0_graph_generate_unk88xx(priv); + nve0_graph_generate_gpc(priv); + nve0_graph_generate_tpc(priv); + nve0_graph_generate_tpcunk(priv); + + nv_wr32(priv, 0x404154, 0x0); + + mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); + mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); + mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); + mmio_list(0x40800c, 0x00000000, 8, 1); + mmio_list(0x408010, 0x80000000, 0, 0); + mmio_list(0x419004, 0x00000000, 8, 1); + mmio_list(0x419008, 0x00000000, 0, 0); + mmio_list(0x4064cc, 0x80000000, 0, 0); + mmio_list(0x408004, 0x00000000, 8, 0); + mmio_list(0x408008, 0x80000030, 0, 0); + mmio_list(0x418808, 0x00000000, 8, 0); + mmio_list(0x41880c, 0x80000030, 0, 0); + mmio_list(0x4064c8, 0x01800600, 0, 0); + mmio_list(0x418810, 0x80000000, 12, 2); + mmio_list(0x419848, 0x10000000, 12, 2); + mmio_list(0x405830, 0x02180648, 0, 0); + mmio_list(0x4064c4, 0x0192ffff, 0, 0); + for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) { + u16 magic0 = 0x0218 * priv->tpc_nr[gpc]; + u16 magic1 = 0x0648 * priv->tpc_nr[gpc]; + magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset; + magic[gpc][1] = 0x00000000 | (magic1 << 16); + offset += 0x0324 * priv->tpc_nr[gpc]; + } + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0); + mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0); + offset += 0x07ff * priv->tpc_nr[gpc]; + } + mmio_list(0x17e91c, 0x06060609, 0, 0); + mmio_list(0x17e920, 0x00090a05, 0, 0); + + nv_wr32(priv, 0x418c6c, 0x1); + nv_wr32(priv, 0x41980c, 0x10); + nv_wr32(priv, 0x41be08, 0x4); + nv_wr32(priv, 0x4064c0, 0x801a00f0); + nv_wr32(priv, 0x405800, 0xf8000bf); + nv_wr32(priv, 0x419c00, 0xa); + + for (tpc = 0, id = 0; tpc < 4; tpc++) { + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + if (tpc < priv->tpc_nr[gpc]) { + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0698), id); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x04e8), id); + nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0088), id++); + } + + nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]); + nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]); + } + } + + tmp = 0; + for (i = 0; i < priv->gpc_nr; i++) + tmp |= priv->tpc_nr[i] << (i * 4); + nv_wr32(priv, 0x406028, tmp); + nv_wr32(priv, 0x405870, tmp); + + nv_wr32(priv, 0x40602c, 0x0); + nv_wr32(priv, 0x405874, 0x0); + nv_wr32(priv, 0x406030, 0x0); + nv_wr32(priv, 0x405878, 0x0); + nv_wr32(priv, 0x406034, 0x0); + nv_wr32(priv, 0x40587c, 0x0); + + /* calculate first set of magics */ + memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); + + gpc = -1; + for (tpc = 0; tpc < priv->tpc_total; tpc++) { + do { + gpc = (gpc + 1) % priv->gpc_nr; + } while (!tpcnr[gpc]); + tpcnr[gpc]--; + + data[tpc / 6] |= gpc << ((tpc % 6) * 5); + } + + for (; tpc < 32; tpc++) + data[tpc / 6] |= 7 << ((tpc % 6) * 5); + + /* and the second... */ + shift = 0; + ntpcv = priv->tpc_total; + while (!(ntpcv & (1 << 4))) { + ntpcv <<= 1; + shift++; + } + + data2[0] = ntpcv << 16; + data2[0] |= shift << 21; + data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24); + data2[0] |= priv->tpc_total << 8; + data2[0] |= priv->magic_not_rop_nr; + for (i = 1; i < 7; i++) + data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5); + + /* and write it all the various parts of PGRAPH */ + nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr); + for (i = 0; i < 6; i++) + nv_wr32(priv, 0x418b08 + (i * 4), data[i]); + + nv_wr32(priv, 0x41bfd0, data2[0]); + nv_wr32(priv, 0x41bfe4, data2[1]); + for (i = 0; i < 6; i++) + nv_wr32(priv, 0x41bf00 + (i * 4), data[i]); + + nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr); + for (i = 0; i < 6; i++) + nv_wr32(priv, 0x40780c + (i * 4), data[i]); + + + memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); + for (gpc = 0; gpc < priv->gpc_nr; gpc++) + tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8); + + for (i = 0, gpc = -1, b = -1; i < 32; i++) { + a = (i * (priv->tpc_total - 1)) / 32; + if (a != b) { + b = a; + do { + gpc = (gpc + 1) % priv->gpc_nr; + } while (!tpcnr[gpc]); + tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--; + + tpc_set |= 1 << ((gpc * 8) + tpc); + } + + nv_wr32(priv, 0x406800 + (i * 0x20), tpc_set); + nv_wr32(priv, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask); + } + + for (i = 0; i < 8; i++) + nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000); + + nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr); + if (priv->gpc_nr == 1) { + nv_mask(priv, 0x408850, 0x0000000f, priv->tpc_nr[0]); + nv_mask(priv, 0x408958, 0x0000000f, priv->tpc_nr[0]); + } else { + nv_mask(priv, 0x408850, 0x0000000f, priv->gpc_nr); + nv_mask(priv, 0x408958, 0x0000000f, priv->gpc_nr); + } + nv_mask(priv, 0x419f78, 0x00000001, 0x00000000); + + nve0_grctx_generate_icmd(priv); + nve0_grctx_generate_a097(priv); + nve0_grctx_generate_902d(priv); + + nv_mask(priv, 0x000260, 0x00000001, 0x00000001); + nv_wr32(priv, 0x418800, 0x7026860a); //XXX + nv_wr32(priv, 0x41be10, 0x00bb8bc7); //XXX + return nvc0_grctx_fini(&info); +} diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc new file mode 100644 index 0000000..f7055af --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc @@ -0,0 +1,544 @@ +/* fuc microcode for nvc0 PGRAPH/GPC + * + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +/* To build: + * m4 gpcnvc0.fuc | envyas -a -w -m fuc -V fuc3 -o gpcnvc0.fuc.h + */ + +/* TODO + * - bracket certain functions with scratch writes, useful for debugging + * - watchdog timer around ctx operations + */ + +.section #nvc0_grgpc_data +include(`nvc0.fuc') +gpc_id: .b32 0 +gpc_mmio_list_head: .b32 0 +gpc_mmio_list_tail: .b32 0 + +tpc_count: .b32 0 +tpc_mask: .b32 0 +tpc_mmio_list_head: .b32 0 +tpc_mmio_list_tail: .b32 0 + +cmd_queue: queue_init + +// chipset descriptions +chipsets: +.b8 0xc0 0 0 0 +.b16 #nvc0_gpc_mmio_head +.b16 #nvc0_gpc_mmio_tail +.b16 #nvc0_tpc_mmio_head +.b16 #nvc0_tpc_mmio_tail +.b8 0xc1 0 0 0 +.b16 #nvc0_gpc_mmio_head +.b16 #nvc1_gpc_mmio_tail +.b16 #nvc0_tpc_mmio_head +.b16 #nvc1_tpc_mmio_tail +.b8 0xc3 0 0 0 +.b16 #nvc0_gpc_mmio_head +.b16 #nvc0_gpc_mmio_tail +.b16 #nvc0_tpc_mmio_head +.b16 #nvc3_tpc_mmio_tail +.b8 0xc4 0 0 0 +.b16 #nvc0_gpc_mmio_head +.b16 #nvc0_gpc_mmio_tail +.b16 #nvc0_tpc_mmio_head +.b16 #nvc3_tpc_mmio_tail +.b8 0xc8 0 0 0 +.b16 #nvc0_gpc_mmio_head +.b16 #nvc0_gpc_mmio_tail +.b16 #nvc0_tpc_mmio_head +.b16 #nvc0_tpc_mmio_tail +.b8 0xce 0 0 0 +.b16 #nvc0_gpc_mmio_head +.b16 #nvc0_gpc_mmio_tail +.b16 #nvc0_tpc_mmio_head +.b16 #nvc3_tpc_mmio_tail +.b8 0xcf 0 0 0 +.b16 #nvc0_gpc_mmio_head +.b16 #nvc0_gpc_mmio_tail +.b16 #nvc0_tpc_mmio_head +.b16 #nvcf_tpc_mmio_tail +.b8 0xd9 0 0 0 +.b16 #nvd9_gpc_mmio_head +.b16 #nvd9_gpc_mmio_tail +.b16 #nvd9_tpc_mmio_head +.b16 #nvd9_tpc_mmio_tail +.b8 0xd7 0 0 0 +.b16 #nvd9_gpc_mmio_head +.b16 #nvd9_gpc_mmio_tail +.b16 #nvd9_tpc_mmio_head +.b16 #nvd9_tpc_mmio_tail +.b8 0 0 0 0 + +// GPC mmio lists +nvc0_gpc_mmio_head: +mmctx_data(0x000380, 1) +mmctx_data(0x000400, 6) +mmctx_data(0x000450, 9) +mmctx_data(0x000600, 1) +mmctx_data(0x000684, 1) +mmctx_data(0x000700, 5) +mmctx_data(0x000800, 1) +mmctx_data(0x000808, 3) +mmctx_data(0x000828, 1) +mmctx_data(0x000830, 1) +mmctx_data(0x0008d8, 1) +mmctx_data(0x0008e0, 1) +mmctx_data(0x0008e8, 6) +mmctx_data(0x00091c, 1) +mmctx_data(0x000924, 3) +mmctx_data(0x000b00, 1) +mmctx_data(0x000b08, 6) +mmctx_data(0x000bb8, 1) +mmctx_data(0x000c08, 1) +mmctx_data(0x000c10, 8) +mmctx_data(0x000c80, 1) +mmctx_data(0x000c8c, 1) +mmctx_data(0x001000, 3) +mmctx_data(0x001014, 1) +nvc0_gpc_mmio_tail: +mmctx_data(0x000c6c, 1); +nvc1_gpc_mmio_tail: + +nvd9_gpc_mmio_head: +mmctx_data(0x000380, 1) +mmctx_data(0x000400, 2) +mmctx_data(0x00040c, 3) +mmctx_data(0x000450, 9) +mmctx_data(0x000600, 1) +mmctx_data(0x000684, 1) +mmctx_data(0x000700, 5) +mmctx_data(0x000800, 1) +mmctx_data(0x000808, 3) +mmctx_data(0x000828, 1) +mmctx_data(0x000830, 1) +mmctx_data(0x0008d8, 1) +mmctx_data(0x0008e0, 1) +mmctx_data(0x0008e8, 6) +mmctx_data(0x00091c, 1) +mmctx_data(0x000924, 3) +mmctx_data(0x000b00, 1) +mmctx_data(0x000b08, 6) +mmctx_data(0x000bb8, 1) +mmctx_data(0x000c08, 1) +mmctx_data(0x000c10, 8) +mmctx_data(0x000c6c, 1) +mmctx_data(0x000c80, 1) +mmctx_data(0x000c8c, 1) +mmctx_data(0x001000, 3) +mmctx_data(0x001014, 1) +nvd9_gpc_mmio_tail: + +// TPC mmio lists +nvc0_tpc_mmio_head: +mmctx_data(0x000018, 1) +mmctx_data(0x00003c, 1) +mmctx_data(0x000048, 1) +mmctx_data(0x000064, 1) +mmctx_data(0x000088, 1) +mmctx_data(0x000200, 6) +mmctx_data(0x00021c, 2) +mmctx_data(0x000300, 6) +mmctx_data(0x0003d0, 1) +mmctx_data(0x0003e0, 2) +mmctx_data(0x000400, 3) +mmctx_data(0x000420, 1) +mmctx_data(0x0004b0, 1) +mmctx_data(0x0004e8, 1) +mmctx_data(0x0004f4, 1) +mmctx_data(0x000520, 2) +mmctx_data(0x000604, 4) +mmctx_data(0x000644, 20) +mmctx_data(0x000698, 1) +mmctx_data(0x000750, 2) +nvc0_tpc_mmio_tail: +mmctx_data(0x000758, 1) +mmctx_data(0x0002c4, 1) +mmctx_data(0x0006e0, 1) +nvcf_tpc_mmio_tail: +mmctx_data(0x0004bc, 1) +nvc3_tpc_mmio_tail: +mmctx_data(0x000544, 1) +nvc1_tpc_mmio_tail: + +nvd9_tpc_mmio_head: +mmctx_data(0x000018, 1) +mmctx_data(0x00003c, 1) +mmctx_data(0x000048, 1) +mmctx_data(0x000064, 1) +mmctx_data(0x000088, 1) +mmctx_data(0x000200, 6) +mmctx_data(0x00021c, 2) +mmctx_data(0x0002c4, 1) +mmctx_data(0x000300, 6) +mmctx_data(0x0003d0, 1) +mmctx_data(0x0003e0, 2) +mmctx_data(0x000400, 3) +mmctx_data(0x000420, 3) +mmctx_data(0x0004b0, 1) +mmctx_data(0x0004e8, 1) +mmctx_data(0x0004f4, 1) +mmctx_data(0x000520, 2) +mmctx_data(0x000544, 1) +mmctx_data(0x000604, 4) +mmctx_data(0x000644, 20) +mmctx_data(0x000698, 1) +mmctx_data(0x0006e0, 1) +mmctx_data(0x000750, 3) +nvd9_tpc_mmio_tail: + +.section #nvc0_grgpc_code +bra #init +define(`include_code') +include(`nvc0.fuc') + +// reports an exception to the host +// +// In: $r15 error code (see nvc0.fuc) +// +error: + push $r14 + mov $r14 -0x67ec // 0x9814 + sethi $r14 0x400000 + call #nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code + add b32 $r14 0x41c + mov $r15 1 + call #nv_wr32 // HUB_CTXCTL_INTR_UP_SET + pop $r14 + ret + +// GPC fuc initialisation, executed by triggering ucode start, will +// fall through to main loop after completion. +// +// Input: +// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh) +// CC_SCRATCH[1]: context base +// +// Output: +// CC_SCRATCH[0]: +// 31:31: set to signal completion +// CC_SCRATCH[1]: +// 31:0: GPC context size +// +init: + clear b32 $r0 + mov $sp $r0 + + // enable fifo access + mov $r1 0x1200 + mov $r2 2 + iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE + + // setup i0 handler, and route all interrupts to it + mov $r1 #ih + mov $iv0 $r1 + mov $r1 0x400 + iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH + + // enable fifo interrupt + mov $r2 4 + iowr I[$r1 + 0x000] $r2 // INTR_EN_SET + + // enable interrupts + bset $flags ie0 + + // figure out which GPC we are, and how many TPCs we have + mov $r1 0x608 + shl b32 $r1 6 + iord $r2 I[$r1 + 0x000] // UNITS + mov $r3 1 + and $r2 0x1f + shl b32 $r3 $r2 + sub b32 $r3 1 + st b32 D[$r0 + #tpc_count] $r2 + st b32 D[$r0 + #tpc_mask] $r3 + add b32 $r1 0x400 + iord $r2 I[$r1 + 0x000] // MYINDEX + st b32 D[$r0 + #gpc_id] $r2 + + // find context data for this chipset + mov $r2 0x800 + shl b32 $r2 6 + iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0] + mov $r1 #chipsets - 12 + init_find_chipset: + add b32 $r1 12 + ld b32 $r3 D[$r1 + 0x00] + cmpu b32 $r3 $r2 + bra e #init_context + cmpu b32 $r3 0 + bra ne #init_find_chipset + // unknown chipset + ret + + // initialise context base, and size tracking + init_context: + mov $r2 0x800 + shl b32 $r2 6 + iord $r2 I[$r2 + 0x100] // CC_SCRATCH[1], initial base + clear b32 $r3 // track GPC context size here + + // set mmctx base addresses now so we don't have to do it later, + // they don't currently ever change + mov $r4 0x700 + shl b32 $r4 6 + shr b32 $r5 $r2 8 + iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE + iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE + + // calculate GPC mmio context size, store the chipset-specific + // mmio list pointers somewhere we can get at them later without + // re-parsing the chipset list + clear b32 $r14 + clear b32 $r15 + ld b16 $r14 D[$r1 + 4] + ld b16 $r15 D[$r1 + 6] + st b16 D[$r0 + #gpc_mmio_list_head] $r14 + st b16 D[$r0 + #gpc_mmio_list_tail] $r15 + call #mmctx_size + add b32 $r2 $r15 + add b32 $r3 $r15 + + // calculate per-TPC mmio context size, store the list pointers + ld b16 $r14 D[$r1 + 8] + ld b16 $r15 D[$r1 + 10] + st b16 D[$r0 + #tpc_mmio_list_head] $r14 + st b16 D[$r0 + #tpc_mmio_list_tail] $r15 + call #mmctx_size + ld b32 $r14 D[$r0 + #tpc_count] + mulu $r14 $r15 + add b32 $r2 $r14 + add b32 $r3 $r14 + + // round up base/size to 256 byte boundary (for strand SWBASE) + add b32 $r4 0x1300 + shr b32 $r3 2 + iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!? + shr b32 $r2 8 + shr b32 $r3 6 + add b32 $r2 1 + add b32 $r3 1 + shl b32 $r2 8 + shl b32 $r3 8 + + // calculate size of strand context data + mov b32 $r15 $r2 + call #strand_ctx_init + add b32 $r3 $r15 + + // save context size, and tell HUB we're done + mov $r1 0x800 + shl b32 $r1 6 + iowr I[$r1 + 0x100] $r3 // CC_SCRATCH[1] = context size + add b32 $r1 0x800 + clear b32 $r2 + bset $r2 31 + iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000 + +// Main program loop, very simple, sleeps until woken up by the interrupt +// handler, pulls a command from the queue and executes its handler +// +main: + bset $flags $p0 + sleep $p0 + mov $r13 #cmd_queue + call #queue_get + bra $p1 #main + + // 0x0000-0x0003 are all context transfers + cmpu b32 $r14 0x04 + bra nc #main_not_ctx_xfer + // fetch $flags and mask off $p1/$p2 + mov $r1 $flags + mov $r2 0x0006 + not b32 $r2 + and $r1 $r2 + // set $p1/$p2 according to transfer type + shl b32 $r14 1 + or $r1 $r14 + mov $flags $r1 + // transfer context data + call #ctx_xfer + bra #main + + main_not_ctx_xfer: + shl b32 $r15 $r14 16 + or $r15 E_BAD_COMMAND + call #error + bra #main + +// interrupt handler +ih: + push $r8 + mov $r8 $flags + push $r8 + push $r9 + push $r10 + push $r11 + push $r13 + push $r14 + push $r15 + + // incoming fifo command? + iord $r10 I[$r0 + 0x200] // INTR + and $r11 $r10 0x00000004 + bra e #ih_no_fifo + // queue incoming fifo command for later processing + mov $r11 0x1900 + mov $r13 #cmd_queue + iord $r14 I[$r11 + 0x100] // FIFO_CMD + iord $r15 I[$r11 + 0x000] // FIFO_DATA + call #queue_put + add b32 $r11 0x400 + mov $r14 1 + iowr I[$r11 + 0x000] $r14 // FIFO_ACK + + // ack, and wake up main() + ih_no_fifo: + iowr I[$r0 + 0x100] $r10 // INTR_ACK + + pop $r15 + pop $r14 + pop $r13 + pop $r11 + pop $r10 + pop $r9 + pop $r8 + mov $flags $r8 + pop $r8 + bclr $flags $p0 + iret + +// Set this GPC's bit in HUB_BAR, used to signal completion of various +// activities to the HUB fuc +// +hub_barrier_done: + mov $r15 1 + ld b32 $r14 D[$r0 + #gpc_id] + shl b32 $r15 $r14 + mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET + sethi $r14 0x400000 + call #nv_wr32 + ret + +// Disables various things, waits a bit, and re-enables them.. +// +// Not sure how exactly this helps, perhaps "ENABLE" is not such a +// good description for the bits we turn off? Anyways, without this, +// funny things happen. +// +ctx_redswitch: + mov $r14 0x614 + shl b32 $r14 6 + mov $r15 0x020 + iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER + mov $r15 8 + ctx_redswitch_delay: + sub b32 $r15 1 + bra ne #ctx_redswitch_delay + mov $r15 0xa20 + iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER + ret + +// Transfer GPC context data between GPU and storage area +// +// In: $r15 context base address +// $p1 clear on save, set on load +// $p2 set if opposite direction done/will be done, so: +// on save it means: "a load will follow this save" +// on load it means: "a save preceeded this load" +// +ctx_xfer: + // set context base address + mov $r1 0xa04 + shl b32 $r1 6 + iowr I[$r1 + 0x000] $r15// MEM_BASE + bra not $p1 #ctx_xfer_not_load + call #ctx_redswitch + ctx_xfer_not_load: + + // strands + mov $r1 0x4afc + sethi $r1 0x20000 + mov $r2 0xc + iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c + call #strand_wait + mov $r2 0x47fc + sethi $r2 0x20000 + iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00 + xbit $r2 $flags $p1 + add b32 $r2 3 + iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD) + + // mmio context + xbit $r10 $flags $p1 // direction + or $r10 2 // first + mov $r11 0x0000 + sethi $r11 0x500000 + ld b32 $r12 D[$r0 + #gpc_id] + shl b32 $r12 15 + add b32 $r11 $r12 // base = NV_PGRAPH_GPCn + ld b32 $r12 D[$r0 + #gpc_mmio_list_head] + ld b32 $r13 D[$r0 + #gpc_mmio_list_tail] + mov $r14 0 // not multi + call #mmctx_xfer + + // per-TPC mmio context + xbit $r10 $flags $p1 // direction + or $r10 4 // last + mov $r11 0x4000 + sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0 + ld b32 $r12 D[$r0 + #gpc_id] + shl b32 $r12 15 + add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0 + ld b32 $r12 D[$r0 + #tpc_mmio_list_head] + ld b32 $r13 D[$r0 + #tpc_mmio_list_tail] + ld b32 $r15 D[$r0 + #tpc_mask] + mov $r14 0x800 // stride = 0x800 + call #mmctx_xfer + + // wait for strands to finish + call #strand_wait + + // if load, or a save without a load following, do some + // unknown stuff that's done after finishing a block of + // strand commands + bra $p1 #ctx_xfer_post + bra not $p2 #ctx_xfer_done + ctx_xfer_post: + mov $r1 0x4afc + sethi $r1 0x20000 + mov $r2 0xd + iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d + call #strand_wait + + // mark completion in HUB's barrier + ctx_xfer_done: + call #hub_barrier_done + ret + +.align 256 diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h new file mode 100644 index 0000000..96050dd --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h @@ -0,0 +1,604 @@ +uint32_t nvc0_grgpc_data[] = { +/* 0x0000: gpc_id */ + 0x00000000, +/* 0x0004: gpc_mmio_list_head */ + 0x00000000, +/* 0x0008: gpc_mmio_list_tail */ + 0x00000000, +/* 0x000c: tpc_count */ + 0x00000000, +/* 0x0010: tpc_mask */ + 0x00000000, +/* 0x0014: tpc_mmio_list_head */ + 0x00000000, +/* 0x0018: tpc_mmio_list_tail */ + 0x00000000, +/* 0x001c: cmd_queue */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +/* 0x0064: chipsets */ + 0x000000c0, + 0x012800c8, + 0x01e40194, + 0x000000c1, + 0x012c00c8, + 0x01f80194, + 0x000000c3, + 0x012800c8, + 0x01f40194, + 0x000000c4, + 0x012800c8, + 0x01f40194, + 0x000000c8, + 0x012800c8, + 0x01e40194, + 0x000000ce, + 0x012800c8, + 0x01f40194, + 0x000000cf, + 0x012800c8, + 0x01f00194, + 0x000000d9, + 0x0194012c, + 0x025401f8, + 0x00000000, +/* 0x00c8: nvc0_gpc_mmio_head */ + 0x00000380, + 0x14000400, + 0x20000450, + 0x00000600, + 0x00000684, + 0x10000700, + 0x00000800, + 0x08000808, + 0x00000828, + 0x00000830, + 0x000008d8, + 0x000008e0, + 0x140008e8, + 0x0000091c, + 0x08000924, + 0x00000b00, + 0x14000b08, + 0x00000bb8, + 0x00000c08, + 0x1c000c10, + 0x00000c80, + 0x00000c8c, + 0x08001000, + 0x00001014, +/* 0x0128: nvc0_gpc_mmio_tail */ + 0x00000c6c, +/* 0x012c: nvc1_gpc_mmio_tail */ +/* 0x012c: nvd9_gpc_mmio_head */ + 0x00000380, + 0x04000400, + 0x0800040c, + 0x20000450, + 0x00000600, + 0x00000684, + 0x10000700, + 0x00000800, + 0x08000808, + 0x00000828, + 0x00000830, + 0x000008d8, + 0x000008e0, + 0x140008e8, + 0x0000091c, + 0x08000924, + 0x00000b00, + 0x14000b08, + 0x00000bb8, + 0x00000c08, + 0x1c000c10, + 0x00000c6c, + 0x00000c80, + 0x00000c8c, + 0x08001000, + 0x00001014, +/* 0x0194: nvd9_gpc_mmio_tail */ +/* 0x0194: nvc0_tpc_mmio_head */ + 0x00000018, + 0x0000003c, + 0x00000048, + 0x00000064, + 0x00000088, + 0x14000200, + 0x0400021c, + 0x14000300, + 0x000003d0, + 0x040003e0, + 0x08000400, + 0x00000420, + 0x000004b0, + 0x000004e8, + 0x000004f4, + 0x04000520, + 0x0c000604, + 0x4c000644, + 0x00000698, + 0x04000750, +/* 0x01e4: nvc0_tpc_mmio_tail */ + 0x00000758, + 0x000002c4, + 0x000006e0, +/* 0x01f0: nvcf_tpc_mmio_tail */ + 0x000004bc, +/* 0x01f4: nvc3_tpc_mmio_tail */ + 0x00000544, +/* 0x01f8: nvc1_tpc_mmio_tail */ +/* 0x01f8: nvd9_tpc_mmio_head */ + 0x00000018, + 0x0000003c, + 0x00000048, + 0x00000064, + 0x00000088, + 0x14000200, + 0x0400021c, + 0x000002c4, + 0x14000300, + 0x000003d0, + 0x040003e0, + 0x08000400, + 0x08000420, + 0x000004b0, + 0x000004e8, + 0x000004f4, + 0x04000520, + 0x00000544, + 0x0c000604, + 0x4c000644, + 0x00000698, + 0x000006e0, + 0x08000750, +}; + +uint32_t nvc0_grgpc_code[] = { + 0x03060ef5, +/* 0x0004: queue_put */ + 0x9800d898, + 0x86f001d9, + 0x0489b808, + 0xf00c1bf4, + 0x21f502f7, + 0x00f802ec, +/* 0x001c: queue_put_next */ + 0xb60798c4, + 0x8dbb0384, + 0x0880b600, + 0x80008e80, + 0x90b6018f, + 0x0f94f001, + 0xf801d980, +/* 0x0039: queue_get */ + 0x0131f400, + 0x9800d898, + 0x89b801d9, + 0x210bf404, + 0xb60789c4, + 0x9dbb0394, + 0x0890b600, + 0x98009e98, + 0x80b6019f, + 0x0f84f001, + 0xf400d880, +/* 0x0066: queue_get_done */ + 0x00f80132, +/* 0x0068: nv_rd32 */ + 0x0728b7f1, + 0xb906b4b6, + 0xc9f002ec, + 0x00bcd01f, +/* 0x0078: nv_rd32_wait */ + 0xc800bccf, + 0x1bf41fcc, + 0x06a7f0fa, + 0x010321f5, + 0xf840bfcf, +/* 0x008d: nv_wr32 */ + 0x28b7f100, + 0x06b4b607, + 0xb980bfd0, + 0xc9f002ec, + 0x1ec9f01f, +/* 0x00a3: nv_wr32_wait */ + 0xcf00bcd0, + 0xccc800bc, + 0xfa1bf41f, +/* 0x00ae: watchdog_reset */ + 0x87f100f8, + 0x84b60430, + 0x1ff9f006, + 0xf8008fd0, +/* 0x00bd: watchdog_clear */ + 0x3087f100, + 0x0684b604, + 0xf80080d0, +/* 0x00c9: wait_donez */ + 0x3c87f100, + 0x0684b608, + 0x99f094bd, + 0x0089d000, + 0x081887f1, + 0xd00684b6, +/* 0x00e2: wait_done_wait_donez */ + 0x87f1008a, + 0x84b60400, + 0x0088cf06, + 0xf4888aff, + 0x87f1f31b, + 0x84b6085c, + 0xf094bd06, + 0x89d00099, +/* 0x0103: wait_doneo */ + 0xf100f800, + 0xb6083c87, + 0x94bd0684, + 0xd00099f0, + 0x87f10089, + 0x84b60818, + 0x008ad006, +/* 0x011c: wait_done_wait_doneo */ + 0x040087f1, + 0xcf0684b6, + 0x8aff0088, + 0xf30bf488, + 0x085c87f1, + 0xbd0684b6, + 0x0099f094, + 0xf80089d0, +/* 0x013d: mmctx_size */ +/* 0x013f: nv_mmctx_size_loop */ + 0x9894bd00, + 0x85b600e8, + 0x0180b61a, + 0xbb0284b6, + 0xe0b60098, + 0x04efb804, + 0xb9eb1bf4, + 0x00f8029f, +/* 0x015c: mmctx_xfer */ + 0x083c87f1, + 0xbd0684b6, + 0x0199f094, + 0xf10089d0, + 0xb6071087, + 0x94bd0684, + 0xf405bbfd, + 0x8bd0090b, + 0x0099f000, +/* 0x0180: mmctx_base_disabled */ + 0xf405eefd, + 0x8ed00c0b, + 0xc08fd080, +/* 0x018f: mmctx_multi_disabled */ + 0xb70199f0, + 0xc8010080, + 0xb4b600ab, + 0x0cb9f010, + 0xb601aec8, + 0xbefd11e4, + 0x008bd005, +/* 0x01a8: mmctx_exec_loop */ +/* 0x01a8: mmctx_wait_free */ + 0xf0008ecf, + 0x0bf41fe4, + 0x00ce98fa, + 0xd005e9fd, + 0xc0b6c08e, + 0x04cdb804, + 0xc8e81bf4, + 0x1bf402ab, +/* 0x01c9: mmctx_fini_wait */ + 0x008bcf18, + 0xb01fb4f0, + 0x1bf410b4, + 0x02a7f0f7, + 0xf4c921f4, +/* 0x01de: mmctx_stop */ + 0xabc81b0e, + 0x10b4b600, + 0xf00cb9f0, + 0x8bd012b9, +/* 0x01ed: mmctx_stop_wait */ + 0x008bcf00, + 0xf412bbc8, +/* 0x01f6: mmctx_done */ + 0x87f1fa1b, + 0x84b6085c, + 0xf094bd06, + 0x89d00199, +/* 0x0207: strand_wait */ + 0xf900f800, + 0x02a7f0a0, + 0xfcc921f4, +/* 0x0213: strand_pre */ + 0xf100f8a0, + 0xf04afc87, + 0x97f00283, + 0x0089d00c, + 0x020721f5, +/* 0x0226: strand_post */ + 0x87f100f8, + 0x83f04afc, + 0x0d97f002, + 0xf50089d0, + 0xf8020721, +/* 0x0239: strand_set */ + 0xfca7f100, + 0x02a3f04f, + 0x0500aba2, + 0xd00fc7f0, + 0xc7f000ac, + 0x00bcd00b, + 0x020721f5, + 0xf000aed0, + 0xbcd00ac7, + 0x0721f500, +/* 0x0263: strand_ctx_init */ + 0xf100f802, + 0xb6083c87, + 0x94bd0684, + 0xd00399f0, + 0x21f50089, + 0xe7f00213, + 0x3921f503, + 0xfca7f102, + 0x02a3f046, + 0x0400aba0, + 0xf040a0d0, + 0xbcd001c7, + 0x0721f500, + 0x010c9202, + 0xf000acd0, + 0xbcd002c7, + 0x0721f500, + 0x2621f502, + 0x8087f102, + 0x0684b608, + 0xb70089cf, + 0x95220080, +/* 0x02ba: ctx_init_strand_loop */ + 0x8ed008fe, + 0x408ed000, + 0xb6808acf, + 0xa0b606a5, + 0x00eabb01, + 0xb60480b6, + 0x1bf40192, + 0x08e4b6e8, + 0xf1f2efbc, + 0xb6085c87, + 0x94bd0684, + 0xd00399f0, + 0x00f80089, +/* 0x02ec: error */ + 0xe7f1e0f9, + 0xe3f09814, + 0x8d21f440, + 0x041ce0b7, + 0xf401f7f0, + 0xe0fc8d21, +/* 0x0306: init */ + 0x04bd00f8, + 0xf10004fe, + 0xf0120017, + 0x12d00227, + 0x3e17f100, + 0x0010fe04, + 0x040017f1, + 0xf0c010d0, + 0x12d00427, + 0x1031f400, + 0x060817f1, + 0xcf0614b6, + 0x37f00012, + 0x1f24f001, + 0xb60432bb, + 0x02800132, + 0x04038003, + 0x040010b7, + 0x800012cf, + 0x27f10002, + 0x24b60800, + 0x0022cf06, +/* 0x035f: init_find_chipset */ + 0xb65817f0, + 0x13980c10, + 0x0432b800, + 0xb00b0bf4, + 0x1bf40034, +/* 0x0373: init_context */ + 0xf100f8f1, + 0xb6080027, + 0x22cf0624, + 0xf134bd40, + 0xb6070047, + 0x25950644, + 0x0045d008, + 0xbd4045d0, + 0x58f4bde4, + 0x1f58021e, + 0x020e4003, + 0xf5040f40, + 0xbb013d21, + 0x3fbb002f, + 0x041e5800, + 0x40051f58, + 0x0f400a0e, + 0x3d21f50c, + 0x030e9801, + 0xbb00effd, + 0x3ebb002e, + 0x0040b700, + 0x0235b613, + 0xb60043d0, + 0x35b60825, + 0x0120b606, + 0xb60130b6, + 0x34b60824, + 0x022fb908, + 0x026321f5, + 0xf1003fbb, + 0xb6080017, + 0x13d00614, + 0x0010b740, + 0xf024bd08, + 0x12d01f29, +/* 0x0401: main */ + 0x0031f400, + 0xf00028f4, + 0x21f41cd7, + 0xf401f439, + 0xf404e4b0, + 0x81fe1e18, + 0x0627f001, + 0x12fd20bd, + 0x01e4b604, + 0xfe051efd, + 0x21f50018, + 0x0ef404c3, +/* 0x0431: main_not_ctx_xfer */ + 0x10ef94d3, + 0xf501f5f0, + 0xf402ec21, +/* 0x043e: ih */ + 0x80f9c60e, + 0xf90188fe, + 0xf990f980, + 0xf9b0f9a0, + 0xf9e0f9d0, + 0x800acff0, + 0xf404abc4, + 0xb7f11d0b, + 0xd7f01900, + 0x40becf1c, + 0xf400bfcf, + 0xb0b70421, + 0xe7f00400, + 0x00bed001, +/* 0x0474: ih_no_fifo */ + 0xfc400ad0, + 0xfce0fcf0, + 0xfcb0fcd0, + 0xfc90fca0, + 0x0088fe80, + 0x32f480fc, +/* 0x048f: hub_barrier_done */ + 0xf001f800, + 0x0e9801f7, + 0x04febb00, + 0x9418e7f1, + 0xf440e3f0, + 0x00f88d21, +/* 0x04a4: ctx_redswitch */ + 0x0614e7f1, + 0xf006e4b6, + 0xefd020f7, + 0x08f7f000, +/* 0x04b4: ctx_redswitch_delay */ + 0xf401f2b6, + 0xf7f1fd1b, + 0xefd00a20, +/* 0x04c3: ctx_xfer */ + 0xf100f800, + 0xb60a0417, + 0x1fd00614, + 0x0711f400, + 0x04a421f5, +/* 0x04d4: ctx_xfer_not_load */ + 0x4afc17f1, + 0xf00213f0, + 0x12d00c27, + 0x0721f500, + 0xfc27f102, + 0x0223f047, + 0xf00020d0, + 0x20b6012c, + 0x0012d003, + 0xf001acf0, + 0xb7f002a5, + 0x50b3f000, + 0xb6000c98, + 0xbcbb0fc4, + 0x010c9800, + 0xf0020d98, + 0x21f500e7, + 0xacf0015c, + 0x04a5f001, + 0x4000b7f1, + 0x9850b3f0, + 0xc4b6000c, + 0x00bcbb0f, + 0x98050c98, + 0x0f98060d, + 0x00e7f104, + 0x5c21f508, + 0x0721f501, + 0x0601f402, +/* 0x054b: ctx_xfer_post */ + 0xf11412f4, + 0xf04afc17, + 0x27f00213, + 0x0012d00d, + 0x020721f5, +/* 0x055c: ctx_xfer_done */ + 0x048f21f5, + 0x000000f8, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc new file mode 100644 index 0000000..62ab231 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc @@ -0,0 +1,456 @@ +/* fuc microcode for nve0 PGRAPH/GPC + * + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +/* To build: + * m4 nve0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nve0_grgpc.fuc.h + */ + +/* TODO + * - bracket certain functions with scratch writes, useful for debugging + * - watchdog timer around ctx operations + */ + +.section #nve0_grgpc_data +include(`nve0.fuc') +gpc_id: .b32 0 +gpc_mmio_list_head: .b32 0 +gpc_mmio_list_tail: .b32 0 + +tpc_count: .b32 0 +tpc_mask: .b32 0 +tpc_mmio_list_head: .b32 0 +tpc_mmio_list_tail: .b32 0 + +cmd_queue: queue_init + +// chipset descriptions +chipsets: +.b8 0xe4 0 0 0 +.b16 #nve4_gpc_mmio_head +.b16 #nve4_gpc_mmio_tail +.b16 #nve4_tpc_mmio_head +.b16 #nve4_tpc_mmio_tail +.b8 0xe7 0 0 0 +.b16 #nve4_gpc_mmio_head +.b16 #nve4_gpc_mmio_tail +.b16 #nve4_tpc_mmio_head +.b16 #nve4_tpc_mmio_tail +.b8 0xe6 0 0 0 +.b16 #nve4_gpc_mmio_head +.b16 #nve4_gpc_mmio_tail +.b16 #nve4_tpc_mmio_head +.b16 #nve4_tpc_mmio_tail +.b8 0 0 0 0 + +// GPC mmio lists +nve4_gpc_mmio_head: +mmctx_data(0x000380, 1) +mmctx_data(0x000400, 2) +mmctx_data(0x00040c, 3) +mmctx_data(0x000450, 9) +mmctx_data(0x000600, 1) +mmctx_data(0x000684, 1) +mmctx_data(0x000700, 5) +mmctx_data(0x000800, 1) +mmctx_data(0x000808, 3) +mmctx_data(0x000828, 1) +mmctx_data(0x000830, 1) +mmctx_data(0x0008d8, 1) +mmctx_data(0x0008e0, 1) +mmctx_data(0x0008e8, 6) +mmctx_data(0x00091c, 1) +mmctx_data(0x000924, 3) +mmctx_data(0x000b00, 1) +mmctx_data(0x000b08, 6) +mmctx_data(0x000bb8, 1) +mmctx_data(0x000c08, 1) +mmctx_data(0x000c10, 8) +mmctx_data(0x000c40, 1) +mmctx_data(0x000c6c, 1) +mmctx_data(0x000c80, 1) +mmctx_data(0x000c8c, 1) +mmctx_data(0x001000, 3) +mmctx_data(0x001014, 1) +mmctx_data(0x003024, 1) +mmctx_data(0x0030c0, 2) +mmctx_data(0x0030e4, 1) +mmctx_data(0x003100, 6) +mmctx_data(0x0031d0, 1) +mmctx_data(0x0031e0, 2) +nve4_gpc_mmio_tail: + +// TPC mmio lists +nve4_tpc_mmio_head: +mmctx_data(0x000048, 1) +mmctx_data(0x000064, 1) +mmctx_data(0x000088, 1) +mmctx_data(0x000200, 6) +mmctx_data(0x00021c, 2) +mmctx_data(0x000230, 1) +mmctx_data(0x0002c4, 1) +mmctx_data(0x000400, 3) +mmctx_data(0x000420, 3) +mmctx_data(0x0004e8, 1) +mmctx_data(0x0004f4, 1) +mmctx_data(0x000604, 4) +mmctx_data(0x000644, 22) +mmctx_data(0x0006ac, 2) +mmctx_data(0x0006c8, 1) +mmctx_data(0x000730, 8) +mmctx_data(0x000758, 1) +mmctx_data(0x000778, 1) +nve4_tpc_mmio_tail: + +.section #nve0_grgpc_code +bra #init +define(`include_code') +include(`nve0.fuc') + +// reports an exception to the host +// +// In: $r15 error code (see nve0.fuc) +// +error: + push $r14 + mov $r14 -0x67ec // 0x9814 + sethi $r14 0x400000 + call #nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code + add b32 $r14 0x41c + mov $r15 1 + call #nv_wr32 // HUB_CTXCTL_INTR_UP_SET + pop $r14 + ret + +// GPC fuc initialisation, executed by triggering ucode start, will +// fall through to main loop after completion. +// +// Input: +// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh) +// CC_SCRATCH[1]: context base +// +// Output: +// CC_SCRATCH[0]: +// 31:31: set to signal completion +// CC_SCRATCH[1]: +// 31:0: GPC context size +// +init: + clear b32 $r0 + mov $sp $r0 + + // enable fifo access + mov $r1 0x1200 + mov $r2 2 + iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE + + // setup i0 handler, and route all interrupts to it + mov $r1 #ih + mov $iv0 $r1 + mov $r1 0x400 + iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH + + // enable fifo interrupt + mov $r2 4 + iowr I[$r1 + 0x000] $r2 // INTR_EN_SET + + // enable interrupts + bset $flags ie0 + + // figure out which GPC we are, and how many TPCs we have + mov $r1 0x608 + shl b32 $r1 6 + iord $r2 I[$r1 + 0x000] // UNITS + mov $r3 1 + and $r2 0x1f + shl b32 $r3 $r2 + sub b32 $r3 1 + st b32 D[$r0 + #tpc_count] $r2 + st b32 D[$r0 + #tpc_mask] $r3 + add b32 $r1 0x400 + iord $r2 I[$r1 + 0x000] // MYINDEX + st b32 D[$r0 + #gpc_id] $r2 + + // find context data for this chipset + mov $r2 0x800 + shl b32 $r2 6 + iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0] + mov $r1 #chipsets - 12 + init_find_chipset: + add b32 $r1 12 + ld b32 $r3 D[$r1 + 0x00] + cmpu b32 $r3 $r2 + bra e #init_context + cmpu b32 $r3 0 + bra ne #init_find_chipset + // unknown chipset + ret + + // initialise context base, and size tracking + init_context: + mov $r2 0x800 + shl b32 $r2 6 + iord $r2 I[$r2 + 0x100] // CC_SCRATCH[1], initial base + clear b32 $r3 // track GPC context size here + + // set mmctx base addresses now so we don't have to do it later, + // they don't currently ever change + mov $r4 0x700 + shl b32 $r4 6 + shr b32 $r5 $r2 8 + iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE + iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE + + // calculate GPC mmio context size, store the chipset-specific + // mmio list pointers somewhere we can get at them later without + // re-parsing the chipset list + clear b32 $r14 + clear b32 $r15 + ld b16 $r14 D[$r1 + 4] + ld b16 $r15 D[$r1 + 6] + st b16 D[$r0 + #gpc_mmio_list_head] $r14 + st b16 D[$r0 + #gpc_mmio_list_tail] $r15 + call #mmctx_size + add b32 $r2 $r15 + add b32 $r3 $r15 + + // calculate per-TPC mmio context size, store the list pointers + ld b16 $r14 D[$r1 + 8] + ld b16 $r15 D[$r1 + 10] + st b16 D[$r0 + #tpc_mmio_list_head] $r14 + st b16 D[$r0 + #tpc_mmio_list_tail] $r15 + call #mmctx_size + ld b32 $r14 D[$r0 + #tpc_count] + mulu $r14 $r15 + add b32 $r2 $r14 + add b32 $r3 $r14 + + // round up base/size to 256 byte boundary (for strand SWBASE) + add b32 $r4 0x1300 + shr b32 $r3 2 + iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!? + shr b32 $r2 8 + shr b32 $r3 6 + add b32 $r2 1 + add b32 $r3 1 + shl b32 $r2 8 + shl b32 $r3 8 + + // calculate size of strand context data + mov b32 $r15 $r2 + call #strand_ctx_init + add b32 $r3 $r15 + + // save context size, and tell HUB we're done + mov $r1 0x800 + shl b32 $r1 6 + iowr I[$r1 + 0x100] $r3 // CC_SCRATCH[1] = context size + add b32 $r1 0x800 + clear b32 $r2 + bset $r2 31 + iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000 + +// Main program loop, very simple, sleeps until woken up by the interrupt +// handler, pulls a command from the queue and executes its handler +// +main: + bset $flags $p0 + sleep $p0 + mov $r13 #cmd_queue + call #queue_get + bra $p1 #main + + // 0x0000-0x0003 are all context transfers + cmpu b32 $r14 0x04 + bra nc #main_not_ctx_xfer + // fetch $flags and mask off $p1/$p2 + mov $r1 $flags + mov $r2 0x0006 + not b32 $r2 + and $r1 $r2 + // set $p1/$p2 according to transfer type + shl b32 $r14 1 + or $r1 $r14 + mov $flags $r1 + // transfer context data + call #ctx_xfer + bra #main + + main_not_ctx_xfer: + shl b32 $r15 $r14 16 + or $r15 E_BAD_COMMAND + call #error + bra #main + +// interrupt handler +ih: + push $r8 + mov $r8 $flags + push $r8 + push $r9 + push $r10 + push $r11 + push $r13 + push $r14 + push $r15 + + // incoming fifo command? + iord $r10 I[$r0 + 0x200] // INTR + and $r11 $r10 0x00000004 + bra e #ih_no_fifo + // queue incoming fifo command for later processing + mov $r11 0x1900 + mov $r13 #cmd_queue + iord $r14 I[$r11 + 0x100] // FIFO_CMD + iord $r15 I[$r11 + 0x000] // FIFO_DATA + call #queue_put + add b32 $r11 0x400 + mov $r14 1 + iowr I[$r11 + 0x000] $r14 // FIFO_ACK + + // ack, and wake up main() + ih_no_fifo: + iowr I[$r0 + 0x100] $r10 // INTR_ACK + + pop $r15 + pop $r14 + pop $r13 + pop $r11 + pop $r10 + pop $r9 + pop $r8 + mov $flags $r8 + pop $r8 + bclr $flags $p0 + iret + +// Set this GPC's bit in HUB_BAR, used to signal completion of various +// activities to the HUB fuc +// +hub_barrier_done: + mov $r15 1 + ld b32 $r14 D[$r0 + #gpc_id] + shl b32 $r15 $r14 + mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET + sethi $r14 0x400000 + call #nv_wr32 + ret + +// Disables various things, waits a bit, and re-enables them.. +// +// Not sure how exactly this helps, perhaps "ENABLE" is not such a +// good description for the bits we turn off? Anyways, without this, +// funny things happen. +// +ctx_redswitch: + mov $r14 0x614 + shl b32 $r14 6 + mov $r15 0x020 + iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER + mov $r15 8 + ctx_redswitch_delay: + sub b32 $r15 1 + bra ne #ctx_redswitch_delay + mov $r15 0xa20 + iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER + ret + +// Transfer GPC context data between GPU and storage area +// +// In: $r15 context base address +// $p1 clear on save, set on load +// $p2 set if opposite direction done/will be done, so: +// on save it means: "a load will follow this save" +// on load it means: "a save preceeded this load" +// +ctx_xfer: + // set context base address + mov $r1 0xa04 + shl b32 $r1 6 + iowr I[$r1 + 0x000] $r15// MEM_BASE + bra not $p1 #ctx_xfer_not_load + call #ctx_redswitch + ctx_xfer_not_load: + + // strands + mov $r1 0x4afc + sethi $r1 0x20000 + mov $r2 0xc + iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c + call #strand_wait + mov $r2 0x47fc + sethi $r2 0x20000 + iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00 + xbit $r2 $flags $p1 + add b32 $r2 3 + iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD) + + // mmio context + xbit $r10 $flags $p1 // direction + or $r10 2 // first + mov $r11 0x0000 + sethi $r11 0x500000 + ld b32 $r12 D[$r0 + #gpc_id] + shl b32 $r12 15 + add b32 $r11 $r12 // base = NV_PGRAPH_GPCn + ld b32 $r12 D[$r0 + #gpc_mmio_list_head] + ld b32 $r13 D[$r0 + #gpc_mmio_list_tail] + mov $r14 0 // not multi + call #mmctx_xfer + + // per-TPC mmio context + xbit $r10 $flags $p1 // direction + or $r10 4 // last + mov $r11 0x4000 + sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0 + ld b32 $r12 D[$r0 + #gpc_id] + shl b32 $r12 15 + add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0 + ld b32 $r12 D[$r0 + #tpc_mmio_list_head] + ld b32 $r13 D[$r0 + #tpc_mmio_list_tail] + ld b32 $r15 D[$r0 + #tpc_mask] + mov $r14 0x800 // stride = 0x800 + call #mmctx_xfer + + // wait for strands to finish + call #strand_wait + + // if load, or a save without a load following, do some + // unknown stuff that's done after finishing a block of + // strand commands + bra $p1 #ctx_xfer_post + bra not $p2 #ctx_xfer_done + ctx_xfer_post: + mov $r1 0x4afc + sethi $r1 0x20000 + mov $r2 0xd + iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d + call #strand_wait + + // mark completion in HUB's barrier + ctx_xfer_done: + call #hub_barrier_done + ret + +.align 256 diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h new file mode 100644 index 0000000..09ee470 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h @@ -0,0 +1,533 @@ +uint32_t nve0_grgpc_data[] = { +/* 0x0000: gpc_id */ + 0x00000000, +/* 0x0004: gpc_mmio_list_head */ + 0x00000000, +/* 0x0008: gpc_mmio_list_tail */ + 0x00000000, +/* 0x000c: tpc_count */ + 0x00000000, +/* 0x0010: tpc_mask */ + 0x00000000, +/* 0x0014: tpc_mmio_list_head */ + 0x00000000, +/* 0x0018: tpc_mmio_list_tail */ + 0x00000000, +/* 0x001c: cmd_queue */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +/* 0x0064: chipsets */ + 0x000000e4, + 0x0110008c, + 0x01580110, + 0x000000e7, + 0x0110008c, + 0x01580110, + 0x000000e6, + 0x0110008c, + 0x01580110, + 0x00000000, +/* 0x008c: nve4_gpc_mmio_head */ + 0x00000380, + 0x04000400, + 0x0800040c, + 0x20000450, + 0x00000600, + 0x00000684, + 0x10000700, + 0x00000800, + 0x08000808, + 0x00000828, + 0x00000830, + 0x000008d8, + 0x000008e0, + 0x140008e8, + 0x0000091c, + 0x08000924, + 0x00000b00, + 0x14000b08, + 0x00000bb8, + 0x00000c08, + 0x1c000c10, + 0x00000c40, + 0x00000c6c, + 0x00000c80, + 0x00000c8c, + 0x08001000, + 0x00001014, + 0x00003024, + 0x040030c0, + 0x000030e4, + 0x14003100, + 0x000031d0, + 0x040031e0, +/* 0x0110: nve4_gpc_mmio_tail */ +/* 0x0110: nve4_tpc_mmio_head */ + 0x00000048, + 0x00000064, + 0x00000088, + 0x14000200, + 0x0400021c, + 0x00000230, + 0x000002c4, + 0x08000400, + 0x08000420, + 0x000004e8, + 0x000004f4, + 0x0c000604, + 0x54000644, + 0x040006ac, + 0x000006c8, + 0x1c000730, + 0x00000758, + 0x00000778, +}; + +uint32_t nve0_grgpc_code[] = { + 0x03060ef5, +/* 0x0004: queue_put */ + 0x9800d898, + 0x86f001d9, + 0x0489b808, + 0xf00c1bf4, + 0x21f502f7, + 0x00f802ec, +/* 0x001c: queue_put_next */ + 0xb60798c4, + 0x8dbb0384, + 0x0880b600, + 0x80008e80, + 0x90b6018f, + 0x0f94f001, + 0xf801d980, +/* 0x0039: queue_get */ + 0x0131f400, + 0x9800d898, + 0x89b801d9, + 0x210bf404, + 0xb60789c4, + 0x9dbb0394, + 0x0890b600, + 0x98009e98, + 0x80b6019f, + 0x0f84f001, + 0xf400d880, +/* 0x0066: queue_get_done */ + 0x00f80132, +/* 0x0068: nv_rd32 */ + 0x0728b7f1, + 0xb906b4b6, + 0xc9f002ec, + 0x00bcd01f, +/* 0x0078: nv_rd32_wait */ + 0xc800bccf, + 0x1bf41fcc, + 0x06a7f0fa, + 0x010321f5, + 0xf840bfcf, +/* 0x008d: nv_wr32 */ + 0x28b7f100, + 0x06b4b607, + 0xb980bfd0, + 0xc9f002ec, + 0x1ec9f01f, +/* 0x00a3: nv_wr32_wait */ + 0xcf00bcd0, + 0xccc800bc, + 0xfa1bf41f, +/* 0x00ae: watchdog_reset */ + 0x87f100f8, + 0x84b60430, + 0x1ff9f006, + 0xf8008fd0, +/* 0x00bd: watchdog_clear */ + 0x3087f100, + 0x0684b604, + 0xf80080d0, +/* 0x00c9: wait_donez */ + 0x3c87f100, + 0x0684b608, + 0x99f094bd, + 0x0089d000, + 0x081887f1, + 0xd00684b6, +/* 0x00e2: wait_done_wait_donez */ + 0x87f1008a, + 0x84b60400, + 0x0088cf06, + 0xf4888aff, + 0x87f1f31b, + 0x84b6085c, + 0xf094bd06, + 0x89d00099, +/* 0x0103: wait_doneo */ + 0xf100f800, + 0xb6083c87, + 0x94bd0684, + 0xd00099f0, + 0x87f10089, + 0x84b60818, + 0x008ad006, +/* 0x011c: wait_done_wait_doneo */ + 0x040087f1, + 0xcf0684b6, + 0x8aff0088, + 0xf30bf488, + 0x085c87f1, + 0xbd0684b6, + 0x0099f094, + 0xf80089d0, +/* 0x013d: mmctx_size */ +/* 0x013f: nv_mmctx_size_loop */ + 0x9894bd00, + 0x85b600e8, + 0x0180b61a, + 0xbb0284b6, + 0xe0b60098, + 0x04efb804, + 0xb9eb1bf4, + 0x00f8029f, +/* 0x015c: mmctx_xfer */ + 0x083c87f1, + 0xbd0684b6, + 0x0199f094, + 0xf10089d0, + 0xb6071087, + 0x94bd0684, + 0xf405bbfd, + 0x8bd0090b, + 0x0099f000, +/* 0x0180: mmctx_base_disabled */ + 0xf405eefd, + 0x8ed00c0b, + 0xc08fd080, +/* 0x018f: mmctx_multi_disabled */ + 0xb70199f0, + 0xc8010080, + 0xb4b600ab, + 0x0cb9f010, + 0xb601aec8, + 0xbefd11e4, + 0x008bd005, +/* 0x01a8: mmctx_exec_loop */ +/* 0x01a8: mmctx_wait_free */ + 0xf0008ecf, + 0x0bf41fe4, + 0x00ce98fa, + 0xd005e9fd, + 0xc0b6c08e, + 0x04cdb804, + 0xc8e81bf4, + 0x1bf402ab, +/* 0x01c9: mmctx_fini_wait */ + 0x008bcf18, + 0xb01fb4f0, + 0x1bf410b4, + 0x02a7f0f7, + 0xf4c921f4, +/* 0x01de: mmctx_stop */ + 0xabc81b0e, + 0x10b4b600, + 0xf00cb9f0, + 0x8bd012b9, +/* 0x01ed: mmctx_stop_wait */ + 0x008bcf00, + 0xf412bbc8, +/* 0x01f6: mmctx_done */ + 0x87f1fa1b, + 0x84b6085c, + 0xf094bd06, + 0x89d00199, +/* 0x0207: strand_wait */ + 0xf900f800, + 0x02a7f0a0, + 0xfcc921f4, +/* 0x0213: strand_pre */ + 0xf100f8a0, + 0xf04afc87, + 0x97f00283, + 0x0089d00c, + 0x020721f5, +/* 0x0226: strand_post */ + 0x87f100f8, + 0x83f04afc, + 0x0d97f002, + 0xf50089d0, + 0xf8020721, +/* 0x0239: strand_set */ + 0xfca7f100, + 0x02a3f04f, + 0x0500aba2, + 0xd00fc7f0, + 0xc7f000ac, + 0x00bcd00b, + 0x020721f5, + 0xf000aed0, + 0xbcd00ac7, + 0x0721f500, +/* 0x0263: strand_ctx_init */ + 0xf100f802, + 0xb6083c87, + 0x94bd0684, + 0xd00399f0, + 0x21f50089, + 0xe7f00213, + 0x3921f503, + 0xfca7f102, + 0x02a3f046, + 0x0400aba0, + 0xf040a0d0, + 0xbcd001c7, + 0x0721f500, + 0x010c9202, + 0xf000acd0, + 0xbcd002c7, + 0x0721f500, + 0x2621f502, + 0x8087f102, + 0x0684b608, + 0xb70089cf, + 0x95220080, +/* 0x02ba: ctx_init_strand_loop */ + 0x8ed008fe, + 0x408ed000, + 0xb6808acf, + 0xa0b606a5, + 0x00eabb01, + 0xb60480b6, + 0x1bf40192, + 0x08e4b6e8, + 0xf1f2efbc, + 0xb6085c87, + 0x94bd0684, + 0xd00399f0, + 0x00f80089, +/* 0x02ec: error */ + 0xe7f1e0f9, + 0xe3f09814, + 0x8d21f440, + 0x041ce0b7, + 0xf401f7f0, + 0xe0fc8d21, +/* 0x0306: init */ + 0x04bd00f8, + 0xf10004fe, + 0xf0120017, + 0x12d00227, + 0x3e17f100, + 0x0010fe04, + 0x040017f1, + 0xf0c010d0, + 0x12d00427, + 0x1031f400, + 0x060817f1, + 0xcf0614b6, + 0x37f00012, + 0x1f24f001, + 0xb60432bb, + 0x02800132, + 0x04038003, + 0x040010b7, + 0x800012cf, + 0x27f10002, + 0x24b60800, + 0x0022cf06, +/* 0x035f: init_find_chipset */ + 0xb65817f0, + 0x13980c10, + 0x0432b800, + 0xb00b0bf4, + 0x1bf40034, +/* 0x0373: init_context */ + 0xf100f8f1, + 0xb6080027, + 0x22cf0624, + 0xf134bd40, + 0xb6070047, + 0x25950644, + 0x0045d008, + 0xbd4045d0, + 0x58f4bde4, + 0x1f58021e, + 0x020e4003, + 0xf5040f40, + 0xbb013d21, + 0x3fbb002f, + 0x041e5800, + 0x40051f58, + 0x0f400a0e, + 0x3d21f50c, + 0x030e9801, + 0xbb00effd, + 0x3ebb002e, + 0x0040b700, + 0x0235b613, + 0xb60043d0, + 0x35b60825, + 0x0120b606, + 0xb60130b6, + 0x34b60824, + 0x022fb908, + 0x026321f5, + 0xf1003fbb, + 0xb6080017, + 0x13d00614, + 0x0010b740, + 0xf024bd08, + 0x12d01f29, +/* 0x0401: main */ + 0x0031f400, + 0xf00028f4, + 0x21f41cd7, + 0xf401f439, + 0xf404e4b0, + 0x81fe1e18, + 0x0627f001, + 0x12fd20bd, + 0x01e4b604, + 0xfe051efd, + 0x21f50018, + 0x0ef404c3, +/* 0x0431: main_not_ctx_xfer */ + 0x10ef94d3, + 0xf501f5f0, + 0xf402ec21, +/* 0x043e: ih */ + 0x80f9c60e, + 0xf90188fe, + 0xf990f980, + 0xf9b0f9a0, + 0xf9e0f9d0, + 0x800acff0, + 0xf404abc4, + 0xb7f11d0b, + 0xd7f01900, + 0x40becf1c, + 0xf400bfcf, + 0xb0b70421, + 0xe7f00400, + 0x00bed001, +/* 0x0474: ih_no_fifo */ + 0xfc400ad0, + 0xfce0fcf0, + 0xfcb0fcd0, + 0xfc90fca0, + 0x0088fe80, + 0x32f480fc, +/* 0x048f: hub_barrier_done */ + 0xf001f800, + 0x0e9801f7, + 0x04febb00, + 0x9418e7f1, + 0xf440e3f0, + 0x00f88d21, +/* 0x04a4: ctx_redswitch */ + 0x0614e7f1, + 0xf006e4b6, + 0xefd020f7, + 0x08f7f000, +/* 0x04b4: ctx_redswitch_delay */ + 0xf401f2b6, + 0xf7f1fd1b, + 0xefd00a20, +/* 0x04c3: ctx_xfer */ + 0xf100f800, + 0xb60a0417, + 0x1fd00614, + 0x0711f400, + 0x04a421f5, +/* 0x04d4: ctx_xfer_not_load */ + 0x4afc17f1, + 0xf00213f0, + 0x12d00c27, + 0x0721f500, + 0xfc27f102, + 0x0223f047, + 0xf00020d0, + 0x20b6012c, + 0x0012d003, + 0xf001acf0, + 0xb7f002a5, + 0x50b3f000, + 0xb6000c98, + 0xbcbb0fc4, + 0x010c9800, + 0xf0020d98, + 0x21f500e7, + 0xacf0015c, + 0x04a5f001, + 0x4000b7f1, + 0x9850b3f0, + 0xc4b6000c, + 0x00bcbb0f, + 0x98050c98, + 0x0f98060d, + 0x00e7f104, + 0x5c21f508, + 0x0721f501, + 0x0601f402, +/* 0x054b: ctx_xfer_post */ + 0xf11412f4, + 0xf04afc17, + 0x27f00213, + 0x0012d00d, + 0x020721f5, +/* 0x055c: ctx_xfer_done */ + 0x048f21f5, + 0x000000f8, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc new file mode 100644 index 0000000..7fbdebb --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc @@ -0,0 +1,869 @@ +/* fuc microcode for nvc0 PGRAPH/HUB + * + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +/* To build: + * m4 hubnvc0.fuc | envyas -a -w -m fuc -V fuc3 -o hubnvc0.fuc.h + */ + +.section #nvc0_grhub_data +include(`nvc0.fuc') +gpc_count: .b32 0 +rop_count: .b32 0 +cmd_queue: queue_init +hub_mmio_list_head: .b32 0 +hub_mmio_list_tail: .b32 0 + +ctx_current: .b32 0 + +chipsets: +.b8 0xc0 0 0 0 +.b16 #nvc0_hub_mmio_head +.b16 #nvc0_hub_mmio_tail +.b8 0xc1 0 0 0 +.b16 #nvc0_hub_mmio_head +.b16 #nvc1_hub_mmio_tail +.b8 0xc3 0 0 0 +.b16 #nvc0_hub_mmio_head +.b16 #nvc0_hub_mmio_tail +.b8 0xc4 0 0 0 +.b16 #nvc0_hub_mmio_head +.b16 #nvc0_hub_mmio_tail +.b8 0xc8 0 0 0 +.b16 #nvc0_hub_mmio_head +.b16 #nvc0_hub_mmio_tail +.b8 0xce 0 0 0 +.b16 #nvc0_hub_mmio_head +.b16 #nvc0_hub_mmio_tail +.b8 0xcf 0 0 0 +.b16 #nvc0_hub_mmio_head +.b16 #nvc0_hub_mmio_tail +.b8 0xd9 0 0 0 +.b16 #nvd9_hub_mmio_head +.b16 #nvd9_hub_mmio_tail +.b8 0xd7 0 0 0 +.b16 #nvd9_hub_mmio_head +.b16 #nvd9_hub_mmio_tail +.b8 0 0 0 0 + +nvc0_hub_mmio_head: +mmctx_data(0x17e91c, 2) +mmctx_data(0x400204, 2) +mmctx_data(0x404004, 11) +mmctx_data(0x404044, 1) +mmctx_data(0x404094, 14) +mmctx_data(0x4040d0, 7) +mmctx_data(0x4040f8, 1) +mmctx_data(0x404130, 3) +mmctx_data(0x404150, 3) +mmctx_data(0x404164, 2) +mmctx_data(0x404174, 3) +mmctx_data(0x404200, 8) +mmctx_data(0x404404, 14) +mmctx_data(0x404460, 4) +mmctx_data(0x404480, 1) +mmctx_data(0x404498, 1) +mmctx_data(0x404604, 4) +mmctx_data(0x404618, 32) +mmctx_data(0x404698, 21) +mmctx_data(0x4046f0, 2) +mmctx_data(0x404700, 22) +mmctx_data(0x405800, 1) +mmctx_data(0x405830, 3) +mmctx_data(0x405854, 1) +mmctx_data(0x405870, 4) +mmctx_data(0x405a00, 2) +mmctx_data(0x405a18, 1) +mmctx_data(0x406020, 1) +mmctx_data(0x406028, 4) +mmctx_data(0x4064a8, 2) +mmctx_data(0x4064b4, 2) +mmctx_data(0x407804, 1) +mmctx_data(0x40780c, 6) +mmctx_data(0x4078bc, 1) +mmctx_data(0x408000, 7) +mmctx_data(0x408064, 1) +mmctx_data(0x408800, 3) +mmctx_data(0x408900, 4) +mmctx_data(0x408980, 1) +nvc0_hub_mmio_tail: +mmctx_data(0x4064c0, 2) +nvc1_hub_mmio_tail: + +nvd9_hub_mmio_head: +mmctx_data(0x17e91c, 2) +mmctx_data(0x400204, 2) +mmctx_data(0x404004, 10) +mmctx_data(0x404044, 1) +mmctx_data(0x404094, 14) +mmctx_data(0x4040d0, 7) +mmctx_data(0x4040f8, 1) +mmctx_data(0x404130, 3) +mmctx_data(0x404150, 3) +mmctx_data(0x404164, 2) +mmctx_data(0x404178, 2) +mmctx_data(0x404200, 8) +mmctx_data(0x404404, 14) +mmctx_data(0x404460, 4) +mmctx_data(0x404480, 1) +mmctx_data(0x404498, 1) +mmctx_data(0x404604, 4) +mmctx_data(0x404618, 32) +mmctx_data(0x404698, 21) +mmctx_data(0x4046f0, 2) +mmctx_data(0x404700, 22) +mmctx_data(0x405800, 1) +mmctx_data(0x405830, 3) +mmctx_data(0x405854, 1) +mmctx_data(0x405870, 4) +mmctx_data(0x405a00, 2) +mmctx_data(0x405a18, 1) +mmctx_data(0x406020, 1) +mmctx_data(0x406028, 4) +mmctx_data(0x4064a8, 2) +mmctx_data(0x4064b4, 5) +mmctx_data(0x407804, 1) +mmctx_data(0x40780c, 6) +mmctx_data(0x4078bc, 1) +mmctx_data(0x408000, 7) +mmctx_data(0x408064, 1) +mmctx_data(0x408800, 3) +mmctx_data(0x408900, 4) +mmctx_data(0x408980, 1) +nvd9_hub_mmio_tail: + +.align 256 +chan_data: +chan_mmio_count: .b32 0 +chan_mmio_address: .b32 0 + +.align 256 +xfer_data: .b32 0 + +.section #nvc0_grhub_code +bra #init +define(`include_code') +include(`nvc0.fuc') + +// reports an exception to the host +// +// In: $r15 error code (see nvc0.fuc) +// +error: + push $r14 + mov $r14 0x814 + shl b32 $r14 6 + iowr I[$r14 + 0x000] $r15 // CC_SCRATCH[5] = error code + mov $r14 0xc1c + shl b32 $r14 6 + mov $r15 1 + iowr I[$r14 + 0x000] $r15 // INTR_UP_SET + pop $r14 + ret + +// HUB fuc initialisation, executed by triggering ucode start, will +// fall through to main loop after completion. +// +// Input: +// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh) +// +// Output: +// CC_SCRATCH[0]: +// 31:31: set to signal completion +// CC_SCRATCH[1]: +// 31:0: total PGRAPH context size +// +init: + clear b32 $r0 + mov $sp $r0 + mov $xdbase $r0 + + // enable fifo access + mov $r1 0x1200 + mov $r2 2 + iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE + + // setup i0 handler, and route all interrupts to it + mov $r1 #ih + mov $iv0 $r1 + mov $r1 0x400 + iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH + + // route HUB_CHANNEL_SWITCH to fuc interrupt 8 + mov $r3 0x404 + shl b32 $r3 6 + mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8 + iowr I[$r3 + 0x000] $r2 + + // not sure what these are, route them because NVIDIA does, and + // the IRQ handler will signal the host if we ever get one.. we + // may find out if/why we need to handle these if so.. + // + mov $r2 0x2004 + iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9 + mov $r2 0x200b + iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10 + mov $r2 0x200c + iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15 + + // enable all INTR_UP interrupts + mov $r2 0xc24 + shl b32 $r2 6 + not b32 $r3 $r0 + iowr I[$r2] $r3 + + // enable fifo, ctxsw, 9, 10, 15 interrupts + mov $r2 -0x78fc // 0x8704 + sethi $r2 0 + iowr I[$r1 + 0x000] $r2 // INTR_EN_SET + + // fifo level triggered, rest edge + sub b32 $r1 0x100 + mov $r2 4 + iowr I[$r1] $r2 + + // enable interrupts + bset $flags ie0 + + // fetch enabled GPC/ROP counts + mov $r14 -0x69fc // 0x409604 + sethi $r14 0x400000 + call #nv_rd32 + extr $r1 $r15 16:20 + st b32 D[$r0 + #rop_count] $r1 + and $r15 0x1f + st b32 D[$r0 + #gpc_count] $r15 + + // set BAR_REQMASK to GPC mask + mov $r1 1 + shl b32 $r1 $r15 + sub b32 $r1 1 + mov $r2 0x40c + shl b32 $r2 6 + iowr I[$r2 + 0x000] $r1 + iowr I[$r2 + 0x100] $r1 + + // find context data for this chipset + mov $r2 0x800 + shl b32 $r2 6 + iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0] + mov $r15 #chipsets - 8 + init_find_chipset: + add b32 $r15 8 + ld b32 $r3 D[$r15 + 0x00] + cmpu b32 $r3 $r2 + bra e #init_context + cmpu b32 $r3 0 + bra ne #init_find_chipset + // unknown chipset + ret + + // context size calculation, reserve first 256 bytes for use by fuc + init_context: + mov $r1 256 + + // calculate size of mmio context data + ld b16 $r14 D[$r15 + 4] + ld b16 $r15 D[$r15 + 6] + sethi $r14 0 + st b32 D[$r0 + #hub_mmio_list_head] $r14 + st b32 D[$r0 + #hub_mmio_list_tail] $r15 + call #mmctx_size + + // set mmctx base addresses now so we don't have to do it later, + // they don't (currently) ever change + mov $r3 0x700 + shl b32 $r3 6 + shr b32 $r4 $r1 8 + iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE + iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE + add b32 $r3 0x1300 + add b32 $r1 $r15 + shr b32 $r15 2 + iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!? + + // strands, base offset needs to be aligned to 256 bytes + shr b32 $r1 8 + add b32 $r1 1 + shl b32 $r1 8 + mov b32 $r15 $r1 + call #strand_ctx_init + add b32 $r1 $r15 + + // initialise each GPC in sequence by passing in the offset of its + // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which + // has previously been uploaded by the host) running. + // + // the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31 + // when it has completed, and return the size of its context data + // in GPCn_CC_SCRATCH[1] + // + ld b32 $r3 D[$r0 + #gpc_count] + mov $r4 0x2000 + sethi $r4 0x500000 + init_gpc: + // setup, and start GPC ucode running + add b32 $r14 $r4 0x804 + mov b32 $r15 $r1 + call #nv_wr32 // CC_SCRATCH[1] = ctx offset + add b32 $r14 $r4 0x800 + mov b32 $r15 $r2 + call #nv_wr32 // CC_SCRATCH[0] = chipset + add b32 $r14 $r4 0x10c + clear b32 $r15 + call #nv_wr32 + add b32 $r14 $r4 0x104 + call #nv_wr32 // ENTRY + add b32 $r14 $r4 0x100 + mov $r15 2 // CTRL_START_TRIGGER + call #nv_wr32 // CTRL + + // wait for it to complete, and adjust context size + add b32 $r14 $r4 0x800 + init_gpc_wait: + call #nv_rd32 + xbit $r15 $r15 31 + bra e #init_gpc_wait + add b32 $r14 $r4 0x804 + call #nv_rd32 + add b32 $r1 $r15 + + // next! + add b32 $r4 0x8000 + sub b32 $r3 1 + bra ne #init_gpc + + // save context size, and tell host we're ready + mov $r2 0x800 + shl b32 $r2 6 + iowr I[$r2 + 0x100] $r1 // CC_SCRATCH[1] = context size + add b32 $r2 0x800 + clear b32 $r1 + bset $r1 31 + iowr I[$r2 + 0x000] $r1 // CC_SCRATCH[0] |= 0x80000000 + +// Main program loop, very simple, sleeps until woken up by the interrupt +// handler, pulls a command from the queue and executes its handler +// +main: + // sleep until we have something to do + bset $flags $p0 + sleep $p0 + mov $r13 #cmd_queue + call #queue_get + bra $p1 #main + + // context switch, requested by GPU? + cmpu b32 $r14 0x4001 + bra ne #main_not_ctx_switch + trace_set(T_AUTO) + mov $r1 0xb00 + shl b32 $r1 6 + iord $r2 I[$r1 + 0x100] // CHAN_NEXT + iord $r1 I[$r1 + 0x000] // CHAN_CUR + + xbit $r3 $r1 31 + bra e #chsw_no_prev + xbit $r3 $r2 31 + bra e #chsw_prev_no_next + push $r2 + mov b32 $r2 $r1 + trace_set(T_SAVE) + bclr $flags $p1 + bset $flags $p2 + call #ctx_xfer + trace_clr(T_SAVE); + pop $r2 + trace_set(T_LOAD); + bset $flags $p1 + call #ctx_xfer + trace_clr(T_LOAD); + bra #chsw_done + chsw_prev_no_next: + push $r2 + mov b32 $r2 $r1 + bclr $flags $p1 + bclr $flags $p2 + call #ctx_xfer + pop $r2 + mov $r1 0xb00 + shl b32 $r1 6 + iowr I[$r1] $r2 + bra #chsw_done + chsw_no_prev: + xbit $r3 $r2 31 + bra e #chsw_done + bset $flags $p1 + bclr $flags $p2 + call #ctx_xfer + + // ack the context switch request + chsw_done: + mov $r1 0xb0c + shl b32 $r1 6 + mov $r2 1 + iowr I[$r1 + 0x000] $r2 // 0x409b0c + trace_clr(T_AUTO) + bra #main + + // request to set current channel? (*not* a context switch) + main_not_ctx_switch: + cmpu b32 $r14 0x0001 + bra ne #main_not_ctx_chan + mov b32 $r2 $r15 + call #ctx_chan + bra #main_done + + // request to store current channel context? + main_not_ctx_chan: + cmpu b32 $r14 0x0002 + bra ne #main_not_ctx_save + trace_set(T_SAVE) + bclr $flags $p1 + bclr $flags $p2 + call #ctx_xfer + trace_clr(T_SAVE) + bra #main_done + + main_not_ctx_save: + shl b32 $r15 $r14 16 + or $r15 E_BAD_COMMAND + call #error + bra #main + + main_done: + mov $r1 0x820 + shl b32 $r1 6 + clear b32 $r2 + bset $r2 31 + iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000 + bra #main + +// interrupt handler +ih: + push $r8 + mov $r8 $flags + push $r8 + push $r9 + push $r10 + push $r11 + push $r13 + push $r14 + push $r15 + + // incoming fifo command? + iord $r10 I[$r0 + 0x200] // INTR + and $r11 $r10 0x00000004 + bra e #ih_no_fifo + // queue incoming fifo command for later processing + mov $r11 0x1900 + mov $r13 #cmd_queue + iord $r14 I[$r11 + 0x100] // FIFO_CMD + iord $r15 I[$r11 + 0x000] // FIFO_DATA + call #queue_put + add b32 $r11 0x400 + mov $r14 1 + iowr I[$r11 + 0x000] $r14 // FIFO_ACK + + // context switch request? + ih_no_fifo: + and $r11 $r10 0x00000100 + bra e #ih_no_ctxsw + // enqueue a context switch for later processing + mov $r13 #cmd_queue + mov $r14 0x4001 + call #queue_put + + // anything we didn't handle, bring it to the host's attention + ih_no_ctxsw: + mov $r11 0x104 + not b32 $r11 + and $r11 $r10 $r11 + bra e #ih_no_other + mov $r10 0xc1c + shl b32 $r10 6 + iowr I[$r10] $r11 // INTR_UP_SET + + // ack, and wake up main() + ih_no_other: + iowr I[$r0 + 0x100] $r10 // INTR_ACK + + pop $r15 + pop $r14 + pop $r13 + pop $r11 + pop $r10 + pop $r9 + pop $r8 + mov $flags $r8 + pop $r8 + bclr $flags $p0 + iret + +// Not real sure, but, MEM_CMD 7 will hang forever if this isn't done +ctx_4160s: + mov $r14 0x4160 + sethi $r14 0x400000 + mov $r15 1 + call #nv_wr32 + ctx_4160s_wait: + call #nv_rd32 + xbit $r15 $r15 4 + bra e #ctx_4160s_wait + ret + +// Without clearing again at end of xfer, some things cause PGRAPH +// to hang with STATUS=0x00000007 until it's cleared.. fbcon can +// still function with it set however... +ctx_4160c: + mov $r14 0x4160 + sethi $r14 0x400000 + clear b32 $r15 + call #nv_wr32 + ret + +// Again, not real sure +// +// In: $r15 value to set 0x404170 to +// +ctx_4170s: + mov $r14 0x4170 + sethi $r14 0x400000 + or $r15 0x10 + call #nv_wr32 + ret + +// Waits for a ctx_4170s() call to complete +// +ctx_4170w: + mov $r14 0x4170 + sethi $r14 0x400000 + call #nv_rd32 + and $r15 0x10 + bra ne #ctx_4170w + ret + +// Disables various things, waits a bit, and re-enables them.. +// +// Not sure how exactly this helps, perhaps "ENABLE" is not such a +// good description for the bits we turn off? Anyways, without this, +// funny things happen. +// +ctx_redswitch: + mov $r14 0x614 + shl b32 $r14 6 + mov $r15 0x270 + iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL + mov $r15 8 + ctx_redswitch_delay: + sub b32 $r15 1 + bra ne #ctx_redswitch_delay + mov $r15 0x770 + iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL + ret + +// Not a clue what this is for, except that unless the value is 0x10, the +// strand context is saved (and presumably restored) incorrectly.. +// +// In: $r15 value to set to (0x00/0x10 are used) +// +ctx_86c: + mov $r14 0x86c + shl b32 $r14 6 + iowr I[$r14] $r15 // HUB(0x86c) = val + mov $r14 -0x75ec + sethi $r14 0x400000 + call #nv_wr32 // ROP(0xa14) = val + mov $r14 -0x5794 + sethi $r14 0x410000 + call #nv_wr32 // GPC(0x86c) = val + ret + +// ctx_load - load's a channel's ctxctl data, and selects its vm +// +// In: $r2 channel address +// +ctx_load: + trace_set(T_CHAN) + + // switch to channel, somewhat magic in parts.. + mov $r10 12 // DONE_UNK12 + call #wait_donez + mov $r1 0xa24 + shl b32 $r1 6 + iowr I[$r1 + 0x000] $r0 // 0x409a24 + mov $r3 0xb00 + shl b32 $r3 6 + iowr I[$r3 + 0x100] $r2 // CHAN_NEXT + mov $r1 0xa0c + shl b32 $r1 6 + mov $r4 7 + iowr I[$r1 + 0x000] $r2 // MEM_CHAN + iowr I[$r1 + 0x100] $r4 // MEM_CMD + ctx_chan_wait_0: + iord $r4 I[$r1 + 0x100] + and $r4 0x1f + bra ne #ctx_chan_wait_0 + iowr I[$r3 + 0x000] $r2 // CHAN_CUR + + // load channel header, fetch PGRAPH context pointer + mov $xtargets $r0 + bclr $r2 31 + shl b32 $r2 4 + add b32 $r2 2 + + trace_set(T_LCHAN) + mov $r1 0xa04 + shl b32 $r1 6 + iowr I[$r1 + 0x000] $r2 // MEM_BASE + mov $r1 0xa20 + shl b32 $r1 6 + mov $r2 0x0002 + sethi $r2 0x80000000 + iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram + mov $r1 0x10 // chan + 0x0210 + mov $r2 #xfer_data + sethi $r2 0x00020000 // 16 bytes + xdld $r1 $r2 + xdwait + trace_clr(T_LCHAN) + + // update current context + ld b32 $r1 D[$r0 + #xfer_data + 4] + shl b32 $r1 24 + ld b32 $r2 D[$r0 + #xfer_data + 0] + shr b32 $r2 8 + or $r1 $r2 + st b32 D[$r0 + #ctx_current] $r1 + + // set transfer base to start of context, and fetch context header + trace_set(T_LCTXH) + mov $r2 0xa04 + shl b32 $r2 6 + iowr I[$r2 + 0x000] $r1 // MEM_BASE + mov $r2 1 + mov $r1 0xa20 + shl b32 $r1 6 + iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm + mov $r1 #chan_data + sethi $r1 0x00060000 // 256 bytes + xdld $r0 $r1 + xdwait + trace_clr(T_LCTXH) + + trace_clr(T_CHAN) + ret + +// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as +// the active channel for ctxctl, but not actually transfer +// any context data. intended for use only during initial +// context construction. +// +// In: $r2 channel address +// +ctx_chan: + call #ctx_4160s + call #ctx_load + mov $r10 12 // DONE_UNK12 + call #wait_donez + mov $r1 0xa10 + shl b32 $r1 6 + mov $r2 5 + iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???) + ctx_chan_wait: + iord $r2 I[$r1 + 0x000] + or $r2 $r2 + bra ne #ctx_chan_wait + call #ctx_4160c + ret + +// Execute per-context state overrides list +// +// Only executed on the first load of a channel. Might want to look into +// removing this and having the host directly modify the channel's context +// to change this state... The nouveau DRM already builds this list as +// it's definitely needed for NVIDIA's, so we may as well use it for now +// +// Input: $r1 mmio list length +// +ctx_mmio_exec: + // set transfer base to be the mmio list + ld b32 $r3 D[$r0 + #chan_mmio_address] + mov $r2 0xa04 + shl b32 $r2 6 + iowr I[$r2 + 0x000] $r3 // MEM_BASE + + clear b32 $r3 + ctx_mmio_loop: + // fetch next 256 bytes of mmio list if necessary + and $r4 $r3 0xff + bra ne #ctx_mmio_pull + mov $r5 #xfer_data + sethi $r5 0x00060000 // 256 bytes + xdld $r3 $r5 + xdwait + + // execute a single list entry + ctx_mmio_pull: + ld b32 $r14 D[$r4 + #xfer_data + 0x00] + ld b32 $r15 D[$r4 + #xfer_data + 0x04] + call #nv_wr32 + + // next! + add b32 $r3 8 + sub b32 $r1 1 + bra ne #ctx_mmio_loop + + // set transfer base back to the current context + ctx_mmio_done: + ld b32 $r3 D[$r0 + #ctx_current] + iowr I[$r2 + 0x000] $r3 // MEM_BASE + + // disable the mmio list now, we don't need/want to execute it again + st b32 D[$r0 + #chan_mmio_count] $r0 + mov $r1 #chan_data + sethi $r1 0x00060000 // 256 bytes + xdst $r0 $r1 + xdwait + ret + +// Transfer HUB context data between GPU and storage area +// +// In: $r2 channel address +// $p1 clear on save, set on load +// $p2 set if opposite direction done/will be done, so: +// on save it means: "a load will follow this save" +// on load it means: "a save preceeded this load" +// +ctx_xfer: + // according to mwk, some kind of wait for idle + mov $r15 0xc00 + shl b32 $r15 6 + mov $r14 4 + iowr I[$r15 + 0x200] $r14 + ctx_xfer_idle: + iord $r14 I[$r15 + 0x000] + and $r14 0x2000 + bra ne #ctx_xfer_idle + + bra not $p1 #ctx_xfer_pre + bra $p2 #ctx_xfer_pre_load + ctx_xfer_pre: + mov $r15 0x10 + call #ctx_86c + call #ctx_4160s + bra not $p1 #ctx_xfer_exec + + ctx_xfer_pre_load: + mov $r15 2 + call #ctx_4170s + call #ctx_4170w + call #ctx_redswitch + clear b32 $r15 + call #ctx_4170s + call #ctx_load + + // fetch context pointer, and initiate xfer on all GPCs + ctx_xfer_exec: + ld b32 $r1 D[$r0 + #ctx_current] + mov $r2 0x414 + shl b32 $r2 6 + iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset + mov $r14 -0x5b00 + sethi $r14 0x410000 + mov b32 $r15 $r1 + call #nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer + add b32 $r14 4 + xbit $r15 $flags $p1 + xbit $r2 $flags $p2 + shl b32 $r2 1 + or $r15 $r2 + call #nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type) + + // strands + mov $r1 0x4afc + sethi $r1 0x20000 + mov $r2 0xc + iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c + call #strand_wait + mov $r2 0x47fc + sethi $r2 0x20000 + iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00 + xbit $r2 $flags $p1 + add b32 $r2 3 + iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD) + + // mmio context + xbit $r10 $flags $p1 // direction + or $r10 6 // first, last + mov $r11 0 // base = 0 + ld b32 $r12 D[$r0 + #hub_mmio_list_head] + ld b32 $r13 D[$r0 + #hub_mmio_list_tail] + mov $r14 0 // not multi + call #mmctx_xfer + + // wait for GPCs to all complete + mov $r10 8 // DONE_BAR + call #wait_doneo + + // wait for strand xfer to complete + call #strand_wait + + // post-op + bra $p1 #ctx_xfer_post + mov $r10 12 // DONE_UNK12 + call #wait_donez + mov $r1 0xa10 + shl b32 $r1 6 + mov $r2 5 + iowr I[$r1] $r2 // MEM_CMD + ctx_xfer_post_save_wait: + iord $r2 I[$r1] + or $r2 $r2 + bra ne #ctx_xfer_post_save_wait + + bra $p2 #ctx_xfer_done + ctx_xfer_post: + mov $r15 2 + call #ctx_4170s + clear b32 $r15 + call #ctx_86c + call #strand_post + call #ctx_4170w + clear b32 $r15 + call #ctx_4170s + + bra not $p1 #ctx_xfer_no_post_mmio + ld b32 $r1 D[$r0 + #chan_mmio_count] + or $r1 $r1 + bra e #ctx_xfer_no_post_mmio + call #ctx_mmio_exec + + ctx_xfer_no_post_mmio: + call #ctx_4160c + + ctx_xfer_done: + ret + +.align 256 diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h new file mode 100644 index 0000000..bb03d2a --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h @@ -0,0 +1,928 @@ +uint32_t nvc0_grhub_data[] = { +/* 0x0000: gpc_count */ + 0x00000000, +/* 0x0004: rop_count */ + 0x00000000, +/* 0x0008: cmd_queue */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +/* 0x0050: hub_mmio_list_head */ + 0x00000000, +/* 0x0054: hub_mmio_list_tail */ + 0x00000000, +/* 0x0058: ctx_current */ + 0x00000000, +/* 0x005c: chipsets */ + 0x000000c0, + 0x013c00a0, + 0x000000c1, + 0x014000a0, + 0x000000c3, + 0x013c00a0, + 0x000000c4, + 0x013c00a0, + 0x000000c8, + 0x013c00a0, + 0x000000ce, + 0x013c00a0, + 0x000000cf, + 0x013c00a0, + 0x000000d9, + 0x01dc0140, + 0x00000000, +/* 0x00a0: nvc0_hub_mmio_head */ + 0x0417e91c, + 0x04400204, + 0x28404004, + 0x00404044, + 0x34404094, + 0x184040d0, + 0x004040f8, + 0x08404130, + 0x08404150, + 0x04404164, + 0x08404174, + 0x1c404200, + 0x34404404, + 0x0c404460, + 0x00404480, + 0x00404498, + 0x0c404604, + 0x7c404618, + 0x50404698, + 0x044046f0, + 0x54404700, + 0x00405800, + 0x08405830, + 0x00405854, + 0x0c405870, + 0x04405a00, + 0x00405a18, + 0x00406020, + 0x0c406028, + 0x044064a8, + 0x044064b4, + 0x00407804, + 0x1440780c, + 0x004078bc, + 0x18408000, + 0x00408064, + 0x08408800, + 0x0c408900, + 0x00408980, +/* 0x013c: nvc0_hub_mmio_tail */ + 0x044064c0, +/* 0x0140: nvc1_hub_mmio_tail */ +/* 0x0140: nvd9_hub_mmio_head */ + 0x0417e91c, + 0x04400204, + 0x24404004, + 0x00404044, + 0x34404094, + 0x184040d0, + 0x004040f8, + 0x08404130, + 0x08404150, + 0x04404164, + 0x04404178, + 0x1c404200, + 0x34404404, + 0x0c404460, + 0x00404480, + 0x00404498, + 0x0c404604, + 0x7c404618, + 0x50404698, + 0x044046f0, + 0x54404700, + 0x00405800, + 0x08405830, + 0x00405854, + 0x0c405870, + 0x04405a00, + 0x00405a18, + 0x00406020, + 0x0c406028, + 0x044064a8, + 0x104064b4, + 0x00407804, + 0x1440780c, + 0x004078bc, + 0x18408000, + 0x00408064, + 0x08408800, + 0x0c408900, + 0x00408980, +/* 0x01dc: nvd9_hub_mmio_tail */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +/* 0x0200: chan_data */ +/* 0x0200: chan_mmio_count */ + 0x00000000, +/* 0x0204: chan_mmio_address */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +/* 0x0300: xfer_data */ + 0x00000000, +}; + +uint32_t nvc0_grhub_code[] = { + 0x03090ef5, +/* 0x0004: queue_put */ + 0x9800d898, + 0x86f001d9, + 0x0489b808, + 0xf00c1bf4, + 0x21f502f7, + 0x00f802ec, +/* 0x001c: queue_put_next */ + 0xb60798c4, + 0x8dbb0384, + 0x0880b600, + 0x80008e80, + 0x90b6018f, + 0x0f94f001, + 0xf801d980, +/* 0x0039: queue_get */ + 0x0131f400, + 0x9800d898, + 0x89b801d9, + 0x210bf404, + 0xb60789c4, + 0x9dbb0394, + 0x0890b600, + 0x98009e98, + 0x80b6019f, + 0x0f84f001, + 0xf400d880, +/* 0x0066: queue_get_done */ + 0x00f80132, +/* 0x0068: nv_rd32 */ + 0x0728b7f1, + 0xb906b4b6, + 0xc9f002ec, + 0x00bcd01f, +/* 0x0078: nv_rd32_wait */ + 0xc800bccf, + 0x1bf41fcc, + 0x06a7f0fa, + 0x010321f5, + 0xf840bfcf, +/* 0x008d: nv_wr32 */ + 0x28b7f100, + 0x06b4b607, + 0xb980bfd0, + 0xc9f002ec, + 0x1ec9f01f, +/* 0x00a3: nv_wr32_wait */ + 0xcf00bcd0, + 0xccc800bc, + 0xfa1bf41f, +/* 0x00ae: watchdog_reset */ + 0x87f100f8, + 0x84b60430, + 0x1ff9f006, + 0xf8008fd0, +/* 0x00bd: watchdog_clear */ + 0x3087f100, + 0x0684b604, + 0xf80080d0, +/* 0x00c9: wait_donez */ + 0x3c87f100, + 0x0684b608, + 0x99f094bd, + 0x0089d000, + 0x081887f1, + 0xd00684b6, +/* 0x00e2: wait_done_wait_donez */ + 0x87f1008a, + 0x84b60400, + 0x0088cf06, + 0xf4888aff, + 0x87f1f31b, + 0x84b6085c, + 0xf094bd06, + 0x89d00099, +/* 0x0103: wait_doneo */ + 0xf100f800, + 0xb6083c87, + 0x94bd0684, + 0xd00099f0, + 0x87f10089, + 0x84b60818, + 0x008ad006, +/* 0x011c: wait_done_wait_doneo */ + 0x040087f1, + 0xcf0684b6, + 0x8aff0088, + 0xf30bf488, + 0x085c87f1, + 0xbd0684b6, + 0x0099f094, + 0xf80089d0, +/* 0x013d: mmctx_size */ +/* 0x013f: nv_mmctx_size_loop */ + 0x9894bd00, + 0x85b600e8, + 0x0180b61a, + 0xbb0284b6, + 0xe0b60098, + 0x04efb804, + 0xb9eb1bf4, + 0x00f8029f, +/* 0x015c: mmctx_xfer */ + 0x083c87f1, + 0xbd0684b6, + 0x0199f094, + 0xf10089d0, + 0xb6071087, + 0x94bd0684, + 0xf405bbfd, + 0x8bd0090b, + 0x0099f000, +/* 0x0180: mmctx_base_disabled */ + 0xf405eefd, + 0x8ed00c0b, + 0xc08fd080, +/* 0x018f: mmctx_multi_disabled */ + 0xb70199f0, + 0xc8010080, + 0xb4b600ab, + 0x0cb9f010, + 0xb601aec8, + 0xbefd11e4, + 0x008bd005, +/* 0x01a8: mmctx_exec_loop */ +/* 0x01a8: mmctx_wait_free */ + 0xf0008ecf, + 0x0bf41fe4, + 0x00ce98fa, + 0xd005e9fd, + 0xc0b6c08e, + 0x04cdb804, + 0xc8e81bf4, + 0x1bf402ab, +/* 0x01c9: mmctx_fini_wait */ + 0x008bcf18, + 0xb01fb4f0, + 0x1bf410b4, + 0x02a7f0f7, + 0xf4c921f4, +/* 0x01de: mmctx_stop */ + 0xabc81b0e, + 0x10b4b600, + 0xf00cb9f0, + 0x8bd012b9, +/* 0x01ed: mmctx_stop_wait */ + 0x008bcf00, + 0xf412bbc8, +/* 0x01f6: mmctx_done */ + 0x87f1fa1b, + 0x84b6085c, + 0xf094bd06, + 0x89d00199, +/* 0x0207: strand_wait */ + 0xf900f800, + 0x02a7f0a0, + 0xfcc921f4, +/* 0x0213: strand_pre */ + 0xf100f8a0, + 0xf04afc87, + 0x97f00283, + 0x0089d00c, + 0x020721f5, +/* 0x0226: strand_post */ + 0x87f100f8, + 0x83f04afc, + 0x0d97f002, + 0xf50089d0, + 0xf8020721, +/* 0x0239: strand_set */ + 0xfca7f100, + 0x02a3f04f, + 0x0500aba2, + 0xd00fc7f0, + 0xc7f000ac, + 0x00bcd00b, + 0x020721f5, + 0xf000aed0, + 0xbcd00ac7, + 0x0721f500, +/* 0x0263: strand_ctx_init */ + 0xf100f802, + 0xb6083c87, + 0x94bd0684, + 0xd00399f0, + 0x21f50089, + 0xe7f00213, + 0x3921f503, + 0xfca7f102, + 0x02a3f046, + 0x0400aba0, + 0xf040a0d0, + 0xbcd001c7, + 0x0721f500, + 0x010c9202, + 0xf000acd0, + 0xbcd002c7, + 0x0721f500, + 0x2621f502, + 0x8087f102, + 0x0684b608, + 0xb70089cf, + 0x95220080, +/* 0x02ba: ctx_init_strand_loop */ + 0x8ed008fe, + 0x408ed000, + 0xb6808acf, + 0xa0b606a5, + 0x00eabb01, + 0xb60480b6, + 0x1bf40192, + 0x08e4b6e8, + 0xf1f2efbc, + 0xb6085c87, + 0x94bd0684, + 0xd00399f0, + 0x00f80089, +/* 0x02ec: error */ + 0xe7f1e0f9, + 0xe4b60814, + 0x00efd006, + 0x0c1ce7f1, + 0xf006e4b6, + 0xefd001f7, + 0xf8e0fc00, +/* 0x0309: init */ + 0xfe04bd00, + 0x07fe0004, + 0x0017f100, + 0x0227f012, + 0xf10012d0, + 0xfe05b917, + 0x17f10010, + 0x10d00400, + 0x0437f1c0, + 0x0634b604, + 0x200327f1, + 0xf10032d0, + 0xd0200427, + 0x27f10132, + 0x32d0200b, + 0x0c27f102, + 0x0732d020, + 0x0c2427f1, + 0xb90624b6, + 0x23d00003, + 0x0427f100, + 0x0023f087, + 0xb70012d0, + 0xf0010012, + 0x12d00427, + 0x1031f400, + 0x9604e7f1, + 0xf440e3f0, + 0xf1c76821, + 0x01018090, + 0x801ff4f0, + 0x17f0000f, + 0x041fbb01, + 0xf10112b6, + 0xb6040c27, + 0x21d00624, + 0x4021d000, + 0x080027f1, + 0xcf0624b6, + 0xf7f00022, +/* 0x03a9: init_find_chipset */ + 0x08f0b654, + 0xb800f398, + 0x0bf40432, + 0x0034b00b, + 0xf8f11bf4, +/* 0x03bd: init_context */ + 0x0017f100, + 0x02fe5801, + 0xf003ff58, + 0x0e8000e3, + 0x150f8014, + 0x013d21f5, + 0x070037f1, + 0x950634b6, + 0x34d00814, + 0x4034d000, + 0x130030b7, + 0xb6001fbb, + 0x3fd002f5, + 0x0815b600, + 0xb60110b6, + 0x1fb90814, + 0x6321f502, + 0x001fbb02, + 0xf1000398, + 0xf0200047, +/* 0x040e: init_gpc */ + 0x4ea05043, + 0x1fb90804, + 0x8d21f402, + 0x08004ea0, + 0xf4022fb9, + 0x4ea08d21, + 0xf4bd010c, + 0xa08d21f4, + 0xf401044e, + 0x4ea08d21, + 0xf7f00100, + 0x8d21f402, + 0x08004ea0, +/* 0x0440: init_gpc_wait */ + 0xc86821f4, + 0x0bf41fff, + 0x044ea0fa, + 0x6821f408, + 0xb7001fbb, + 0xb6800040, + 0x1bf40132, + 0x0027f1b4, + 0x0624b608, + 0xb74021d0, + 0xbd080020, + 0x1f19f014, +/* 0x0473: main */ + 0xf40021d0, + 0x28f40031, + 0x08d7f000, + 0xf43921f4, + 0xe4b1f401, + 0x1bf54001, + 0x87f100d1, + 0x84b6083c, + 0xf094bd06, + 0x89d00499, + 0x0017f100, + 0x0614b60b, + 0xcf4012cf, + 0x13c80011, + 0x7e0bf41f, + 0xf41f23c8, + 0x20f95a0b, + 0xf10212b9, + 0xb6083c87, + 0x94bd0684, + 0xd00799f0, + 0x32f40089, + 0x0231f401, + 0x082921f5, + 0x085c87f1, + 0xbd0684b6, + 0x0799f094, + 0xfc0089d0, + 0x3c87f120, + 0x0684b608, + 0x99f094bd, + 0x0089d006, + 0xf50131f4, + 0xf1082921, + 0xb6085c87, + 0x94bd0684, + 0xd00699f0, + 0x0ef40089, +/* 0x0509: chsw_prev_no_next */ + 0xb920f931, + 0x32f40212, + 0x0232f401, + 0x082921f5, + 0x17f120fc, + 0x14b60b00, + 0x0012d006, +/* 0x0527: chsw_no_prev */ + 0xc8130ef4, + 0x0bf41f23, + 0x0131f40d, + 0xf50232f4, +/* 0x0537: chsw_done */ + 0xf1082921, + 0xb60b0c17, + 0x27f00614, + 0x0012d001, + 0x085c87f1, + 0xbd0684b6, + 0x0499f094, + 0xf50089d0, +/* 0x0557: main_not_ctx_switch */ + 0xb0ff200e, + 0x1bf401e4, + 0x02f2b90d, + 0x07b521f5, +/* 0x0567: main_not_ctx_chan */ + 0xb0420ef4, + 0x1bf402e4, + 0x3c87f12e, + 0x0684b608, + 0x99f094bd, + 0x0089d007, + 0xf40132f4, + 0x21f50232, + 0x87f10829, + 0x84b6085c, + 0xf094bd06, + 0x89d00799, + 0x110ef400, +/* 0x0598: main_not_ctx_save */ + 0xf010ef94, + 0x21f501f5, + 0x0ef502ec, +/* 0x05a6: main_done */ + 0x17f1fed1, + 0x14b60820, + 0xf024bd06, + 0x12d01f29, + 0xbe0ef500, +/* 0x05b9: ih */ + 0xfe80f9fe, + 0x80f90188, + 0xa0f990f9, + 0xd0f9b0f9, + 0xf0f9e0f9, + 0xc4800acf, + 0x0bf404ab, + 0x00b7f11d, + 0x08d7f019, + 0xcf40becf, + 0x21f400bf, + 0x00b0b704, + 0x01e7f004, +/* 0x05ef: ih_no_fifo */ + 0xe400bed0, + 0xf40100ab, + 0xd7f00d0b, + 0x01e7f108, + 0x0421f440, +/* 0x0600: ih_no_ctxsw */ + 0x0104b7f1, + 0xabffb0bd, + 0x0d0bf4b4, + 0x0c1ca7f1, + 0xd006a4b6, +/* 0x0616: ih_no_other */ + 0x0ad000ab, + 0xfcf0fc40, + 0xfcd0fce0, + 0xfca0fcb0, + 0xfe80fc90, + 0x80fc0088, + 0xf80032f4, +/* 0x0631: ctx_4160s */ + 0x60e7f101, + 0x40e3f041, + 0xf401f7f0, +/* 0x063e: ctx_4160s_wait */ + 0x21f48d21, + 0x04ffc868, + 0xf8fa0bf4, +/* 0x0649: ctx_4160c */ + 0x60e7f100, + 0x40e3f041, + 0x21f4f4bd, +/* 0x0657: ctx_4170s */ + 0xf100f88d, + 0xf04170e7, + 0xf5f040e3, + 0x8d21f410, +/* 0x0666: ctx_4170w */ + 0xe7f100f8, + 0xe3f04170, + 0x6821f440, + 0xf410f4f0, + 0x00f8f31b, +/* 0x0678: ctx_redswitch */ + 0x0614e7f1, + 0xf106e4b6, + 0xd00270f7, + 0xf7f000ef, +/* 0x0689: ctx_redswitch_delay */ + 0x01f2b608, + 0xf1fd1bf4, + 0xd00770f7, + 0x00f800ef, +/* 0x0698: ctx_86c */ + 0x086ce7f1, + 0xd006e4b6, + 0xe7f100ef, + 0xe3f08a14, + 0x8d21f440, + 0xa86ce7f1, + 0xf441e3f0, + 0x00f88d21, +/* 0x06b8: ctx_load */ + 0x083c87f1, + 0xbd0684b6, + 0x0599f094, + 0xf00089d0, + 0x21f40ca7, + 0x2417f1c9, + 0x0614b60a, + 0xf10010d0, + 0xb60b0037, + 0x32d00634, + 0x0c17f140, + 0x0614b60a, + 0xd00747f0, + 0x14d00012, +/* 0x06f1: ctx_chan_wait_0 */ + 0x4014cf40, + 0xf41f44f0, + 0x32d0fa1b, + 0x000bfe00, + 0xb61f2af0, + 0x20b60424, + 0x3c87f102, + 0x0684b608, + 0x99f094bd, + 0x0089d008, + 0x0a0417f1, + 0xd00614b6, + 0x17f10012, + 0x14b60a20, + 0x0227f006, + 0x800023f1, + 0xf00012d0, + 0x27f11017, + 0x23f00300, + 0x0512fa02, + 0x87f103f8, + 0x84b6085c, + 0xf094bd06, + 0x89d00899, + 0xc1019800, + 0x981814b6, + 0x25b6c002, + 0x0512fd08, + 0xf1160180, + 0xb6083c87, + 0x94bd0684, + 0xd00999f0, + 0x27f10089, + 0x24b60a04, + 0x0021d006, + 0xf10127f0, + 0xb60a2017, + 0x12d00614, + 0x0017f100, + 0x0613f002, + 0xf80501fa, + 0x5c87f103, + 0x0684b608, + 0x99f094bd, + 0x0089d009, + 0x085c87f1, + 0xbd0684b6, + 0x0599f094, + 0xf80089d0, +/* 0x07b5: ctx_chan */ + 0x3121f500, + 0xb821f506, + 0x0ca7f006, + 0xf1c921f4, + 0xb60a1017, + 0x27f00614, + 0x0012d005, +/* 0x07d0: ctx_chan_wait */ + 0xfd0012cf, + 0x1bf40522, + 0x4921f5fa, +/* 0x07df: ctx_mmio_exec */ + 0x9800f806, + 0x27f18103, + 0x24b60a04, + 0x0023d006, +/* 0x07ee: ctx_mmio_loop */ + 0x34c434bd, + 0x0f1bf4ff, + 0x030057f1, + 0xfa0653f0, + 0x03f80535, +/* 0x0800: ctx_mmio_pull */ + 0x98c04e98, + 0x21f4c14f, + 0x0830b68d, + 0xf40112b6, +/* 0x0812: ctx_mmio_done */ + 0x0398df1b, + 0x0023d016, + 0xf1800080, + 0xf0020017, + 0x01fa0613, + 0xf803f806, +/* 0x0829: ctx_xfer */ + 0x00f7f100, + 0x06f4b60c, + 0xd004e7f0, +/* 0x0836: ctx_xfer_idle */ + 0xfecf80fe, + 0x00e4f100, + 0xf91bf420, + 0xf40611f4, +/* 0x0846: ctx_xfer_pre */ + 0xf7f01102, + 0x9821f510, + 0x3121f506, + 0x1c11f406, +/* 0x0854: ctx_xfer_pre_load */ + 0xf502f7f0, + 0xf5065721, + 0xf5066621, + 0xbd067821, + 0x5721f5f4, + 0xb821f506, +/* 0x086d: ctx_xfer_exec */ + 0x16019806, + 0x041427f1, + 0xd00624b6, + 0xe7f10020, + 0xe3f0a500, + 0x021fb941, + 0xb68d21f4, + 0xfcf004e0, + 0x022cf001, + 0xfd0124b6, + 0x21f405f2, + 0xfc17f18d, + 0x0213f04a, + 0xd00c27f0, + 0x21f50012, + 0x27f10207, + 0x23f047fc, + 0x0020d002, + 0xb6012cf0, + 0x12d00320, + 0x01acf000, + 0xf006a5f0, + 0x0c9800b7, + 0x150d9814, + 0xf500e7f0, + 0xf0015c21, + 0x21f508a7, + 0x21f50103, + 0x01f40207, + 0x0ca7f022, + 0xf1c921f4, + 0xb60a1017, + 0x27f00614, + 0x0012d005, +/* 0x08f4: ctx_xfer_post_save_wait */ + 0xfd0012cf, + 0x1bf40522, + 0x3202f4fa, +/* 0x0900: ctx_xfer_post */ + 0xf502f7f0, + 0xbd065721, + 0x9821f5f4, + 0x2621f506, + 0x6621f502, + 0xf5f4bd06, + 0xf4065721, + 0x01981011, + 0x0511fd80, + 0xf5070bf4, +/* 0x092b: ctx_xfer_no_post_mmio */ + 0xf507df21, +/* 0x092f: ctx_xfer_done */ + 0xf8064921, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc new file mode 100644 index 0000000..7fe9d7c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc @@ -0,0 +1,793 @@ +/* fuc microcode for nve0 PGRAPH/HUB + * + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +/* To build: + * m4 nve0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nve0_grhub.fuc.h + */ + +.section #nve0_grhub_data +include(`nve0.fuc') +gpc_count: .b32 0 +rop_count: .b32 0 +cmd_queue: queue_init +hub_mmio_list_head: .b32 0 +hub_mmio_list_tail: .b32 0 + +ctx_current: .b32 0 + +chipsets: +.b8 0xe4 0 0 0 +.b16 #nve4_hub_mmio_head +.b16 #nve4_hub_mmio_tail +.b8 0xe7 0 0 0 +.b16 #nve4_hub_mmio_head +.b16 #nve4_hub_mmio_tail +.b8 0xe6 0 0 0 +.b16 #nve4_hub_mmio_head +.b16 #nve4_hub_mmio_tail +.b8 0 0 0 0 + +nve4_hub_mmio_head: +mmctx_data(0x17e91c, 2) +mmctx_data(0x400204, 2) +mmctx_data(0x404010, 7) +mmctx_data(0x4040a8, 9) +mmctx_data(0x4040d0, 7) +mmctx_data(0x4040f8, 1) +mmctx_data(0x404130, 3) +mmctx_data(0x404150, 3) +mmctx_data(0x404164, 1) +mmctx_data(0x4041a0, 4) +mmctx_data(0x404200, 4) +mmctx_data(0x404404, 14) +mmctx_data(0x404460, 4) +mmctx_data(0x404480, 1) +mmctx_data(0x404498, 1) +mmctx_data(0x404604, 4) +mmctx_data(0x404618, 4) +mmctx_data(0x40462c, 2) +mmctx_data(0x404640, 1) +mmctx_data(0x404654, 1) +mmctx_data(0x404660, 1) +mmctx_data(0x404678, 19) +mmctx_data(0x4046c8, 3) +mmctx_data(0x404700, 3) +mmctx_data(0x404718, 10) +mmctx_data(0x404744, 2) +mmctx_data(0x404754, 1) +mmctx_data(0x405800, 1) +mmctx_data(0x405830, 3) +mmctx_data(0x405854, 1) +mmctx_data(0x405870, 4) +mmctx_data(0x405a00, 2) +mmctx_data(0x405a18, 1) +mmctx_data(0x405b00, 1) +mmctx_data(0x405b10, 1) +mmctx_data(0x406020, 1) +mmctx_data(0x406028, 4) +mmctx_data(0x4064a8, 2) +mmctx_data(0x4064b4, 2) +mmctx_data(0x4064c0, 12) +mmctx_data(0x4064fc, 1) +mmctx_data(0x407040, 1) +mmctx_data(0x407804, 1) +mmctx_data(0x40780c, 6) +mmctx_data(0x4078bc, 1) +mmctx_data(0x408000, 7) +mmctx_data(0x408064, 1) +mmctx_data(0x408800, 3) +mmctx_data(0x408840, 1) +mmctx_data(0x408900, 3) +mmctx_data(0x408980, 1) +nve4_hub_mmio_tail: + +.align 256 +chan_data: +chan_mmio_count: .b32 0 +chan_mmio_address: .b32 0 + +.align 256 +xfer_data: .b32 0 + +.section #nve0_grhub_code +bra #init +define(`include_code') +include(`nve0.fuc') + +// reports an exception to the host +// +// In: $r15 error code (see nve0.fuc) +// +error: + push $r14 + mov $r14 0x814 + shl b32 $r14 6 + iowr I[$r14 + 0x000] $r15 // CC_SCRATCH[5] = error code + mov $r14 0xc1c + shl b32 $r14 6 + mov $r15 1 + iowr I[$r14 + 0x000] $r15 // INTR_UP_SET + pop $r14 + ret + +// HUB fuc initialisation, executed by triggering ucode start, will +// fall through to main loop after completion. +// +// Input: +// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh) +// +// Output: +// CC_SCRATCH[0]: +// 31:31: set to signal completion +// CC_SCRATCH[1]: +// 31:0: total PGRAPH context size +// +init: + clear b32 $r0 + mov $sp $r0 + mov $xdbase $r0 + + // enable fifo access + mov $r1 0x1200 + mov $r2 2 + iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE + + // setup i0 handler, and route all interrupts to it + mov $r1 #ih + mov $iv0 $r1 + mov $r1 0x400 + iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH + + // route HUB_CHANNEL_SWITCH to fuc interrupt 8 + mov $r3 0x404 + shl b32 $r3 6 + mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8 + iowr I[$r3 + 0x000] $r2 + + // not sure what these are, route them because NVIDIA does, and + // the IRQ handler will signal the host if we ever get one.. we + // may find out if/why we need to handle these if so.. + // + mov $r2 0x2004 + iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9 + mov $r2 0x200b + iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10 + mov $r2 0x200c + iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15 + + // enable all INTR_UP interrupts + mov $r2 0xc24 + shl b32 $r2 6 + not b32 $r3 $r0 + iowr I[$r2] $r3 + + // enable fifo, ctxsw, 9, 10, 15 interrupts + mov $r2 -0x78fc // 0x8704 + sethi $r2 0 + iowr I[$r1 + 0x000] $r2 // INTR_EN_SET + + // fifo level triggered, rest edge + sub b32 $r1 0x100 + mov $r2 4 + iowr I[$r1] $r2 + + // enable interrupts + bset $flags ie0 + + // fetch enabled GPC/ROP counts + mov $r14 -0x69fc // 0x409604 + sethi $r14 0x400000 + call #nv_rd32 + extr $r1 $r15 16:20 + st b32 D[$r0 + #rop_count] $r1 + and $r15 0x1f + st b32 D[$r0 + #gpc_count] $r15 + + // set BAR_REQMASK to GPC mask + mov $r1 1 + shl b32 $r1 $r15 + sub b32 $r1 1 + mov $r2 0x40c + shl b32 $r2 6 + iowr I[$r2 + 0x000] $r1 + iowr I[$r2 + 0x100] $r1 + + // find context data for this chipset + mov $r2 0x800 + shl b32 $r2 6 + iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0] + mov $r15 #chipsets - 8 + init_find_chipset: + add b32 $r15 8 + ld b32 $r3 D[$r15 + 0x00] + cmpu b32 $r3 $r2 + bra e #init_context + cmpu b32 $r3 0 + bra ne #init_find_chipset + // unknown chipset + ret + + // context size calculation, reserve first 256 bytes for use by fuc + init_context: + mov $r1 256 + + // calculate size of mmio context data + ld b16 $r14 D[$r15 + 4] + ld b16 $r15 D[$r15 + 6] + sethi $r14 0 + st b32 D[$r0 + #hub_mmio_list_head] $r14 + st b32 D[$r0 + #hub_mmio_list_tail] $r15 + call #mmctx_size + + // set mmctx base addresses now so we don't have to do it later, + // they don't (currently) ever change + mov $r3 0x700 + shl b32 $r3 6 + shr b32 $r4 $r1 8 + iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE + iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE + add b32 $r3 0x1300 + add b32 $r1 $r15 + shr b32 $r15 2 + iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!? + + // strands, base offset needs to be aligned to 256 bytes + shr b32 $r1 8 + add b32 $r1 1 + shl b32 $r1 8 + mov b32 $r15 $r1 + call #strand_ctx_init + add b32 $r1 $r15 + + // initialise each GPC in sequence by passing in the offset of its + // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which + // has previously been uploaded by the host) running. + // + // the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31 + // when it has completed, and return the size of its context data + // in GPCn_CC_SCRATCH[1] + // + ld b32 $r3 D[$r0 + #gpc_count] + mov $r4 0x2000 + sethi $r4 0x500000 + init_gpc: + // setup, and start GPC ucode running + add b32 $r14 $r4 0x804 + mov b32 $r15 $r1 + call #nv_wr32 // CC_SCRATCH[1] = ctx offset + add b32 $r14 $r4 0x800 + mov b32 $r15 $r2 + call #nv_wr32 // CC_SCRATCH[0] = chipset + add b32 $r14 $r4 0x10c + clear b32 $r15 + call #nv_wr32 + add b32 $r14 $r4 0x104 + call #nv_wr32 // ENTRY + add b32 $r14 $r4 0x100 + mov $r15 2 // CTRL_START_TRIGGER + call #nv_wr32 // CTRL + + // wait for it to complete, and adjust context size + add b32 $r14 $r4 0x800 + init_gpc_wait: + call #nv_rd32 + xbit $r15 $r15 31 + bra e #init_gpc_wait + add b32 $r14 $r4 0x804 + call #nv_rd32 + add b32 $r1 $r15 + + // next! + add b32 $r4 0x8000 + sub b32 $r3 1 + bra ne #init_gpc + + // save context size, and tell host we're ready + mov $r2 0x800 + shl b32 $r2 6 + iowr I[$r2 + 0x100] $r1 // CC_SCRATCH[1] = context size + add b32 $r2 0x800 + clear b32 $r1 + bset $r1 31 + iowr I[$r2 + 0x000] $r1 // CC_SCRATCH[0] |= 0x80000000 + +// Main program loop, very simple, sleeps until woken up by the interrupt +// handler, pulls a command from the queue and executes its handler +// +main: + // sleep until we have something to do + bset $flags $p0 + sleep $p0 + mov $r13 #cmd_queue + call #queue_get + bra $p1 #main + + // context switch, requested by GPU? + cmpu b32 $r14 0x4001 + bra ne #main_not_ctx_switch + trace_set(T_AUTO) + mov $r1 0xb00 + shl b32 $r1 6 + iord $r2 I[$r1 + 0x100] // CHAN_NEXT + iord $r1 I[$r1 + 0x000] // CHAN_CUR + + xbit $r3 $r1 31 + bra e #chsw_no_prev + xbit $r3 $r2 31 + bra e #chsw_prev_no_next + push $r2 + mov b32 $r2 $r1 + trace_set(T_SAVE) + bclr $flags $p1 + bset $flags $p2 + call #ctx_xfer + trace_clr(T_SAVE); + pop $r2 + trace_set(T_LOAD); + bset $flags $p1 + call #ctx_xfer + trace_clr(T_LOAD); + bra #chsw_done + chsw_prev_no_next: + push $r2 + mov b32 $r2 $r1 + bclr $flags $p1 + bclr $flags $p2 + call #ctx_xfer + pop $r2 + mov $r1 0xb00 + shl b32 $r1 6 + iowr I[$r1] $r2 + bra #chsw_done + chsw_no_prev: + xbit $r3 $r2 31 + bra e #chsw_done + bset $flags $p1 + bclr $flags $p2 + call #ctx_xfer + + // ack the context switch request + chsw_done: + mov $r1 0xb0c + shl b32 $r1 6 + mov $r2 1 + iowr I[$r1 + 0x000] $r2 // 0x409b0c + trace_clr(T_AUTO) + bra #main + + // request to set current channel? (*not* a context switch) + main_not_ctx_switch: + cmpu b32 $r14 0x0001 + bra ne #main_not_ctx_chan + mov b32 $r2 $r15 + call #ctx_chan + bra #main_done + + // request to store current channel context? + main_not_ctx_chan: + cmpu b32 $r14 0x0002 + bra ne #main_not_ctx_save + trace_set(T_SAVE) + bclr $flags $p1 + bclr $flags $p2 + call #ctx_xfer + trace_clr(T_SAVE) + bra #main_done + + main_not_ctx_save: + shl b32 $r15 $r14 16 + or $r15 E_BAD_COMMAND + call #error + bra #main + + main_done: + mov $r1 0x820 + shl b32 $r1 6 + clear b32 $r2 + bset $r2 31 + iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000 + bra #main + +// interrupt handler +ih: + push $r8 + mov $r8 $flags + push $r8 + push $r9 + push $r10 + push $r11 + push $r13 + push $r14 + push $r15 + + // incoming fifo command? + iord $r10 I[$r0 + 0x200] // INTR + and $r11 $r10 0x00000004 + bra e #ih_no_fifo + // queue incoming fifo command for later processing + mov $r11 0x1900 + mov $r13 #cmd_queue + iord $r14 I[$r11 + 0x100] // FIFO_CMD + iord $r15 I[$r11 + 0x000] // FIFO_DATA + call #queue_put + add b32 $r11 0x400 + mov $r14 1 + iowr I[$r11 + 0x000] $r14 // FIFO_ACK + + // context switch request? + ih_no_fifo: + and $r11 $r10 0x00000100 + bra e #ih_no_ctxsw + // enqueue a context switch for later processing + mov $r13 #cmd_queue + mov $r14 0x4001 + call #queue_put + + // anything we didn't handle, bring it to the host's attention + ih_no_ctxsw: + mov $r11 0x104 + not b32 $r11 + and $r11 $r10 $r11 + bra e #ih_no_other + mov $r10 0xc1c + shl b32 $r10 6 + iowr I[$r10] $r11 // INTR_UP_SET + + // ack, and wake up main() + ih_no_other: + iowr I[$r0 + 0x100] $r10 // INTR_ACK + + pop $r15 + pop $r14 + pop $r13 + pop $r11 + pop $r10 + pop $r9 + pop $r8 + mov $flags $r8 + pop $r8 + bclr $flags $p0 + iret + +// Again, not real sure +// +// In: $r15 value to set 0x404170 to +// +ctx_4170s: + mov $r14 0x4170 + sethi $r14 0x400000 + or $r15 0x10 + call #nv_wr32 + ret + +// Waits for a ctx_4170s() call to complete +// +ctx_4170w: + mov $r14 0x4170 + sethi $r14 0x400000 + call #nv_rd32 + and $r15 0x10 + bra ne #ctx_4170w + ret + +// Disables various things, waits a bit, and re-enables them.. +// +// Not sure how exactly this helps, perhaps "ENABLE" is not such a +// good description for the bits we turn off? Anyways, without this, +// funny things happen. +// +ctx_redswitch: + mov $r14 0x614 + shl b32 $r14 6 + mov $r15 0x270 + iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL + mov $r15 8 + ctx_redswitch_delay: + sub b32 $r15 1 + bra ne #ctx_redswitch_delay + mov $r15 0x770 + iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL + ret + +// Not a clue what this is for, except that unless the value is 0x10, the +// strand context is saved (and presumably restored) incorrectly.. +// +// In: $r15 value to set to (0x00/0x10 are used) +// +ctx_86c: + mov $r14 0x86c + shl b32 $r14 6 + iowr I[$r14] $r15 // HUB(0x86c) = val + mov $r14 -0x75ec + sethi $r14 0x400000 + call #nv_wr32 // ROP(0xa14) = val + mov $r14 -0x5794 + sethi $r14 0x410000 + call #nv_wr32 // GPC(0x86c) = val + ret + +// ctx_load - load's a channel's ctxctl data, and selects its vm +// +// In: $r2 channel address +// +ctx_load: + trace_set(T_CHAN) + + // switch to channel, somewhat magic in parts.. + mov $r10 12 // DONE_UNK12 + call #wait_donez + mov $r1 0xa24 + shl b32 $r1 6 + iowr I[$r1 + 0x000] $r0 // 0x409a24 + mov $r3 0xb00 + shl b32 $r3 6 + iowr I[$r3 + 0x100] $r2 // CHAN_NEXT + mov $r1 0xa0c + shl b32 $r1 6 + mov $r4 7 + iowr I[$r1 + 0x000] $r2 // MEM_CHAN + iowr I[$r1 + 0x100] $r4 // MEM_CMD + ctx_chan_wait_0: + iord $r4 I[$r1 + 0x100] + and $r4 0x1f + bra ne #ctx_chan_wait_0 + iowr I[$r3 + 0x000] $r2 // CHAN_CUR + + // load channel header, fetch PGRAPH context pointer + mov $xtargets $r0 + bclr $r2 31 + shl b32 $r2 4 + add b32 $r2 2 + + trace_set(T_LCHAN) + mov $r1 0xa04 + shl b32 $r1 6 + iowr I[$r1 + 0x000] $r2 // MEM_BASE + mov $r1 0xa20 + shl b32 $r1 6 + mov $r2 0x0002 + sethi $r2 0x80000000 + iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram + mov $r1 0x10 // chan + 0x0210 + mov $r2 #xfer_data + sethi $r2 0x00020000 // 16 bytes + xdld $r1 $r2 + xdwait + trace_clr(T_LCHAN) + + // update current context + ld b32 $r1 D[$r0 + #xfer_data + 4] + shl b32 $r1 24 + ld b32 $r2 D[$r0 + #xfer_data + 0] + shr b32 $r2 8 + or $r1 $r2 + st b32 D[$r0 + #ctx_current] $r1 + + // set transfer base to start of context, and fetch context header + trace_set(T_LCTXH) + mov $r2 0xa04 + shl b32 $r2 6 + iowr I[$r2 + 0x000] $r1 // MEM_BASE + mov $r2 1 + mov $r1 0xa20 + shl b32 $r1 6 + iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm + mov $r1 #chan_data + sethi $r1 0x00060000 // 256 bytes + xdld $r0 $r1 + xdwait + trace_clr(T_LCTXH) + + trace_clr(T_CHAN) + ret + +// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as +// the active channel for ctxctl, but not actually transfer +// any context data. intended for use only during initial +// context construction. +// +// In: $r2 channel address +// +ctx_chan: + call #ctx_load + mov $r10 12 // DONE_UNK12 + call #wait_donez + mov $r1 0xa10 + shl b32 $r1 6 + mov $r2 5 + iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???) + ctx_chan_wait: + iord $r2 I[$r1 + 0x000] + or $r2 $r2 + bra ne #ctx_chan_wait + ret + +// Execute per-context state overrides list +// +// Only executed on the first load of a channel. Might want to look into +// removing this and having the host directly modify the channel's context +// to change this state... The nouveau DRM already builds this list as +// it's definitely needed for NVIDIA's, so we may as well use it for now +// +// Input: $r1 mmio list length +// +ctx_mmio_exec: + // set transfer base to be the mmio list + ld b32 $r3 D[$r0 + #chan_mmio_address] + mov $r2 0xa04 + shl b32 $r2 6 + iowr I[$r2 + 0x000] $r3 // MEM_BASE + + clear b32 $r3 + ctx_mmio_loop: + // fetch next 256 bytes of mmio list if necessary + and $r4 $r3 0xff + bra ne #ctx_mmio_pull + mov $r5 #xfer_data + sethi $r5 0x00060000 // 256 bytes + xdld $r3 $r5 + xdwait + + // execute a single list entry + ctx_mmio_pull: + ld b32 $r14 D[$r4 + #xfer_data + 0x00] + ld b32 $r15 D[$r4 + #xfer_data + 0x04] + call #nv_wr32 + + // next! + add b32 $r3 8 + sub b32 $r1 1 + bra ne #ctx_mmio_loop + + // set transfer base back to the current context + ctx_mmio_done: + ld b32 $r3 D[$r0 + #ctx_current] + iowr I[$r2 + 0x000] $r3 // MEM_BASE + + // disable the mmio list now, we don't need/want to execute it again + st b32 D[$r0 + #chan_mmio_count] $r0 + mov $r1 #chan_data + sethi $r1 0x00060000 // 256 bytes + xdst $r0 $r1 + xdwait + ret + +// Transfer HUB context data between GPU and storage area +// +// In: $r2 channel address +// $p1 clear on save, set on load +// $p2 set if opposite direction done/will be done, so: +// on save it means: "a load will follow this save" +// on load it means: "a save preceeded this load" +// +ctx_xfer: + // according to mwk, some kind of wait for idle + mov $r15 0xc00 + shl b32 $r15 6 + mov $r14 4 + iowr I[$r15 + 0x200] $r14 + ctx_xfer_idle: + iord $r14 I[$r15 + 0x000] + and $r14 0x2000 + bra ne #ctx_xfer_idle + + bra not $p1 #ctx_xfer_pre + bra $p2 #ctx_xfer_pre_load + ctx_xfer_pre: + mov $r15 0x10 + call #ctx_86c + bra not $p1 #ctx_xfer_exec + + ctx_xfer_pre_load: + mov $r15 2 + call #ctx_4170s + call #ctx_4170w + call #ctx_redswitch + clear b32 $r15 + call #ctx_4170s + call #ctx_load + + // fetch context pointer, and initiate xfer on all GPCs + ctx_xfer_exec: + ld b32 $r1 D[$r0 + #ctx_current] + mov $r2 0x414 + shl b32 $r2 6 + iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset + mov $r14 -0x5b00 + sethi $r14 0x410000 + mov b32 $r15 $r1 + call #nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer + add b32 $r14 4 + xbit $r15 $flags $p1 + xbit $r2 $flags $p2 + shl b32 $r2 1 + or $r15 $r2 + call #nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type) + + // strands + mov $r1 0x4afc + sethi $r1 0x20000 + mov $r2 0xc + iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c + call #strand_wait + mov $r2 0x47fc + sethi $r2 0x20000 + iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00 + xbit $r2 $flags $p1 + add b32 $r2 3 + iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD) + + // mmio context + xbit $r10 $flags $p1 // direction + or $r10 6 // first, last + mov $r11 0 // base = 0 + ld b32 $r12 D[$r0 + #hub_mmio_list_head] + ld b32 $r13 D[$r0 + #hub_mmio_list_tail] + mov $r14 0 // not multi + call #mmctx_xfer + + // wait for GPCs to all complete + mov $r10 8 // DONE_BAR + call #wait_doneo + + // wait for strand xfer to complete + call #strand_wait + + // post-op + bra $p1 #ctx_xfer_post + mov $r10 12 // DONE_UNK12 + call #wait_donez + mov $r1 0xa10 + shl b32 $r1 6 + mov $r2 5 + iowr I[$r1] $r2 // MEM_CMD + ctx_xfer_post_save_wait: + iord $r2 I[$r1] + or $r2 $r2 + bra ne #ctx_xfer_post_save_wait + + bra $p2 #ctx_xfer_done + ctx_xfer_post: + mov $r15 2 + call #ctx_4170s + clear b32 $r15 + call #ctx_86c + call #strand_post + call #ctx_4170w + clear b32 $r15 + call #ctx_4170s + + bra not $p1 #ctx_xfer_no_post_mmio + ld b32 $r1 D[$r0 + #chan_mmio_count] + or $r1 $r1 + bra e #ctx_xfer_no_post_mmio + call #ctx_mmio_exec + + ctx_xfer_no_post_mmio: + + ctx_xfer_done: + ret + +.align 256 diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h new file mode 100644 index 0000000..e3421af --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h @@ -0,0 +1,858 @@ +uint32_t nve0_grhub_data[] = { +/* 0x0000: gpc_count */ + 0x00000000, +/* 0x0004: rop_count */ + 0x00000000, +/* 0x0008: cmd_queue */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +/* 0x0050: hub_mmio_list_head */ + 0x00000000, +/* 0x0054: hub_mmio_list_tail */ + 0x00000000, +/* 0x0058: ctx_current */ + 0x00000000, +/* 0x005c: chipsets */ + 0x000000e4, + 0x01440078, + 0x000000e7, + 0x01440078, + 0x000000e6, + 0x01440078, + 0x00000000, +/* 0x0078: nve4_hub_mmio_head */ + 0x0417e91c, + 0x04400204, + 0x18404010, + 0x204040a8, + 0x184040d0, + 0x004040f8, + 0x08404130, + 0x08404150, + 0x00404164, + 0x0c4041a0, + 0x0c404200, + 0x34404404, + 0x0c404460, + 0x00404480, + 0x00404498, + 0x0c404604, + 0x0c404618, + 0x0440462c, + 0x00404640, + 0x00404654, + 0x00404660, + 0x48404678, + 0x084046c8, + 0x08404700, + 0x24404718, + 0x04404744, + 0x00404754, + 0x00405800, + 0x08405830, + 0x00405854, + 0x0c405870, + 0x04405a00, + 0x00405a18, + 0x00405b00, + 0x00405b10, + 0x00406020, + 0x0c406028, + 0x044064a8, + 0x044064b4, + 0x2c4064c0, + 0x004064fc, + 0x00407040, + 0x00407804, + 0x1440780c, + 0x004078bc, + 0x18408000, + 0x00408064, + 0x08408800, + 0x00408840, + 0x08408900, + 0x00408980, +/* 0x0144: nve4_hub_mmio_tail */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +/* 0x0200: chan_data */ +/* 0x0200: chan_mmio_count */ + 0x00000000, +/* 0x0204: chan_mmio_address */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, +/* 0x0300: xfer_data */ + 0x00000000, +}; + +uint32_t nve0_grhub_code[] = { + 0x03090ef5, +/* 0x0004: queue_put */ + 0x9800d898, + 0x86f001d9, + 0x0489b808, + 0xf00c1bf4, + 0x21f502f7, + 0x00f802ec, +/* 0x001c: queue_put_next */ + 0xb60798c4, + 0x8dbb0384, + 0x0880b600, + 0x80008e80, + 0x90b6018f, + 0x0f94f001, + 0xf801d980, +/* 0x0039: queue_get */ + 0x0131f400, + 0x9800d898, + 0x89b801d9, + 0x210bf404, + 0xb60789c4, + 0x9dbb0394, + 0x0890b600, + 0x98009e98, + 0x80b6019f, + 0x0f84f001, + 0xf400d880, +/* 0x0066: queue_get_done */ + 0x00f80132, +/* 0x0068: nv_rd32 */ + 0x0728b7f1, + 0xb906b4b6, + 0xc9f002ec, + 0x00bcd01f, +/* 0x0078: nv_rd32_wait */ + 0xc800bccf, + 0x1bf41fcc, + 0x06a7f0fa, + 0x010321f5, + 0xf840bfcf, +/* 0x008d: nv_wr32 */ + 0x28b7f100, + 0x06b4b607, + 0xb980bfd0, + 0xc9f002ec, + 0x1ec9f01f, +/* 0x00a3: nv_wr32_wait */ + 0xcf00bcd0, + 0xccc800bc, + 0xfa1bf41f, +/* 0x00ae: watchdog_reset */ + 0x87f100f8, + 0x84b60430, + 0x1ff9f006, + 0xf8008fd0, +/* 0x00bd: watchdog_clear */ + 0x3087f100, + 0x0684b604, + 0xf80080d0, +/* 0x00c9: wait_donez */ + 0x3c87f100, + 0x0684b608, + 0x99f094bd, + 0x0089d000, + 0x081887f1, + 0xd00684b6, +/* 0x00e2: wait_done_wait_donez */ + 0x87f1008a, + 0x84b60400, + 0x0088cf06, + 0xf4888aff, + 0x87f1f31b, + 0x84b6085c, + 0xf094bd06, + 0x89d00099, +/* 0x0103: wait_doneo */ + 0xf100f800, + 0xb6083c87, + 0x94bd0684, + 0xd00099f0, + 0x87f10089, + 0x84b60818, + 0x008ad006, +/* 0x011c: wait_done_wait_doneo */ + 0x040087f1, + 0xcf0684b6, + 0x8aff0088, + 0xf30bf488, + 0x085c87f1, + 0xbd0684b6, + 0x0099f094, + 0xf80089d0, +/* 0x013d: mmctx_size */ +/* 0x013f: nv_mmctx_size_loop */ + 0x9894bd00, + 0x85b600e8, + 0x0180b61a, + 0xbb0284b6, + 0xe0b60098, + 0x04efb804, + 0xb9eb1bf4, + 0x00f8029f, +/* 0x015c: mmctx_xfer */ + 0x083c87f1, + 0xbd0684b6, + 0x0199f094, + 0xf10089d0, + 0xb6071087, + 0x94bd0684, + 0xf405bbfd, + 0x8bd0090b, + 0x0099f000, +/* 0x0180: mmctx_base_disabled */ + 0xf405eefd, + 0x8ed00c0b, + 0xc08fd080, +/* 0x018f: mmctx_multi_disabled */ + 0xb70199f0, + 0xc8010080, + 0xb4b600ab, + 0x0cb9f010, + 0xb601aec8, + 0xbefd11e4, + 0x008bd005, +/* 0x01a8: mmctx_exec_loop */ +/* 0x01a8: mmctx_wait_free */ + 0xf0008ecf, + 0x0bf41fe4, + 0x00ce98fa, + 0xd005e9fd, + 0xc0b6c08e, + 0x04cdb804, + 0xc8e81bf4, + 0x1bf402ab, +/* 0x01c9: mmctx_fini_wait */ + 0x008bcf18, + 0xb01fb4f0, + 0x1bf410b4, + 0x02a7f0f7, + 0xf4c921f4, +/* 0x01de: mmctx_stop */ + 0xabc81b0e, + 0x10b4b600, + 0xf00cb9f0, + 0x8bd012b9, +/* 0x01ed: mmctx_stop_wait */ + 0x008bcf00, + 0xf412bbc8, +/* 0x01f6: mmctx_done */ + 0x87f1fa1b, + 0x84b6085c, + 0xf094bd06, + 0x89d00199, +/* 0x0207: strand_wait */ + 0xf900f800, + 0x02a7f0a0, + 0xfcc921f4, +/* 0x0213: strand_pre */ + 0xf100f8a0, + 0xf04afc87, + 0x97f00283, + 0x0089d00c, + 0x020721f5, +/* 0x0226: strand_post */ + 0x87f100f8, + 0x83f04afc, + 0x0d97f002, + 0xf50089d0, + 0xf8020721, +/* 0x0239: strand_set */ + 0xfca7f100, + 0x02a3f04f, + 0x0500aba2, + 0xd00fc7f0, + 0xc7f000ac, + 0x00bcd00b, + 0x020721f5, + 0xf000aed0, + 0xbcd00ac7, + 0x0721f500, +/* 0x0263: strand_ctx_init */ + 0xf100f802, + 0xb6083c87, + 0x94bd0684, + 0xd00399f0, + 0x21f50089, + 0xe7f00213, + 0x3921f503, + 0xfca7f102, + 0x02a3f046, + 0x0400aba0, + 0xf040a0d0, + 0xbcd001c7, + 0x0721f500, + 0x010c9202, + 0xf000acd0, + 0xbcd002c7, + 0x0721f500, + 0x2621f502, + 0x8087f102, + 0x0684b608, + 0xb70089cf, + 0x95220080, +/* 0x02ba: ctx_init_strand_loop */ + 0x8ed008fe, + 0x408ed000, + 0xb6808acf, + 0xa0b606a5, + 0x00eabb01, + 0xb60480b6, + 0x1bf40192, + 0x08e4b6e8, + 0xf1f2efbc, + 0xb6085c87, + 0x94bd0684, + 0xd00399f0, + 0x00f80089, +/* 0x02ec: error */ + 0xe7f1e0f9, + 0xe4b60814, + 0x00efd006, + 0x0c1ce7f1, + 0xf006e4b6, + 0xefd001f7, + 0xf8e0fc00, +/* 0x0309: init */ + 0xfe04bd00, + 0x07fe0004, + 0x0017f100, + 0x0227f012, + 0xf10012d0, + 0xfe05b917, + 0x17f10010, + 0x10d00400, + 0x0437f1c0, + 0x0634b604, + 0x200327f1, + 0xf10032d0, + 0xd0200427, + 0x27f10132, + 0x32d0200b, + 0x0c27f102, + 0x0732d020, + 0x0c2427f1, + 0xb90624b6, + 0x23d00003, + 0x0427f100, + 0x0023f087, + 0xb70012d0, + 0xf0010012, + 0x12d00427, + 0x1031f400, + 0x9604e7f1, + 0xf440e3f0, + 0xf1c76821, + 0x01018090, + 0x801ff4f0, + 0x17f0000f, + 0x041fbb01, + 0xf10112b6, + 0xb6040c27, + 0x21d00624, + 0x4021d000, + 0x080027f1, + 0xcf0624b6, + 0xf7f00022, +/* 0x03a9: init_find_chipset */ + 0x08f0b654, + 0xb800f398, + 0x0bf40432, + 0x0034b00b, + 0xf8f11bf4, +/* 0x03bd: init_context */ + 0x0017f100, + 0x02fe5801, + 0xf003ff58, + 0x0e8000e3, + 0x150f8014, + 0x013d21f5, + 0x070037f1, + 0x950634b6, + 0x34d00814, + 0x4034d000, + 0x130030b7, + 0xb6001fbb, + 0x3fd002f5, + 0x0815b600, + 0xb60110b6, + 0x1fb90814, + 0x6321f502, + 0x001fbb02, + 0xf1000398, + 0xf0200047, +/* 0x040e: init_gpc */ + 0x4ea05043, + 0x1fb90804, + 0x8d21f402, + 0x08004ea0, + 0xf4022fb9, + 0x4ea08d21, + 0xf4bd010c, + 0xa08d21f4, + 0xf401044e, + 0x4ea08d21, + 0xf7f00100, + 0x8d21f402, + 0x08004ea0, +/* 0x0440: init_gpc_wait */ + 0xc86821f4, + 0x0bf41fff, + 0x044ea0fa, + 0x6821f408, + 0xb7001fbb, + 0xb6800040, + 0x1bf40132, + 0x0027f1b4, + 0x0624b608, + 0xb74021d0, + 0xbd080020, + 0x1f19f014, +/* 0x0473: main */ + 0xf40021d0, + 0x28f40031, + 0x08d7f000, + 0xf43921f4, + 0xe4b1f401, + 0x1bf54001, + 0x87f100d1, + 0x84b6083c, + 0xf094bd06, + 0x89d00499, + 0x0017f100, + 0x0614b60b, + 0xcf4012cf, + 0x13c80011, + 0x7e0bf41f, + 0xf41f23c8, + 0x20f95a0b, + 0xf10212b9, + 0xb6083c87, + 0x94bd0684, + 0xd00799f0, + 0x32f40089, + 0x0231f401, + 0x07fb21f5, + 0x085c87f1, + 0xbd0684b6, + 0x0799f094, + 0xfc0089d0, + 0x3c87f120, + 0x0684b608, + 0x99f094bd, + 0x0089d006, + 0xf50131f4, + 0xf107fb21, + 0xb6085c87, + 0x94bd0684, + 0xd00699f0, + 0x0ef40089, +/* 0x0509: chsw_prev_no_next */ + 0xb920f931, + 0x32f40212, + 0x0232f401, + 0x07fb21f5, + 0x17f120fc, + 0x14b60b00, + 0x0012d006, +/* 0x0527: chsw_no_prev */ + 0xc8130ef4, + 0x0bf41f23, + 0x0131f40d, + 0xf50232f4, +/* 0x0537: chsw_done */ + 0xf107fb21, + 0xb60b0c17, + 0x27f00614, + 0x0012d001, + 0x085c87f1, + 0xbd0684b6, + 0x0499f094, + 0xf50089d0, +/* 0x0557: main_not_ctx_switch */ + 0xb0ff200e, + 0x1bf401e4, + 0x02f2b90d, + 0x078f21f5, +/* 0x0567: main_not_ctx_chan */ + 0xb0420ef4, + 0x1bf402e4, + 0x3c87f12e, + 0x0684b608, + 0x99f094bd, + 0x0089d007, + 0xf40132f4, + 0x21f50232, + 0x87f107fb, + 0x84b6085c, + 0xf094bd06, + 0x89d00799, + 0x110ef400, +/* 0x0598: main_not_ctx_save */ + 0xf010ef94, + 0x21f501f5, + 0x0ef502ec, +/* 0x05a6: main_done */ + 0x17f1fed1, + 0x14b60820, + 0xf024bd06, + 0x12d01f29, + 0xbe0ef500, +/* 0x05b9: ih */ + 0xfe80f9fe, + 0x80f90188, + 0xa0f990f9, + 0xd0f9b0f9, + 0xf0f9e0f9, + 0xc4800acf, + 0x0bf404ab, + 0x00b7f11d, + 0x08d7f019, + 0xcf40becf, + 0x21f400bf, + 0x00b0b704, + 0x01e7f004, +/* 0x05ef: ih_no_fifo */ + 0xe400bed0, + 0xf40100ab, + 0xd7f00d0b, + 0x01e7f108, + 0x0421f440, +/* 0x0600: ih_no_ctxsw */ + 0x0104b7f1, + 0xabffb0bd, + 0x0d0bf4b4, + 0x0c1ca7f1, + 0xd006a4b6, +/* 0x0616: ih_no_other */ + 0x0ad000ab, + 0xfcf0fc40, + 0xfcd0fce0, + 0xfca0fcb0, + 0xfe80fc90, + 0x80fc0088, + 0xf80032f4, +/* 0x0631: ctx_4170s */ + 0x70e7f101, + 0x40e3f041, + 0xf410f5f0, + 0x00f88d21, +/* 0x0640: ctx_4170w */ + 0x4170e7f1, + 0xf440e3f0, + 0xf4f06821, + 0xf31bf410, +/* 0x0652: ctx_redswitch */ + 0xe7f100f8, + 0xe4b60614, + 0x70f7f106, + 0x00efd002, +/* 0x0663: ctx_redswitch_delay */ + 0xb608f7f0, + 0x1bf401f2, + 0x70f7f1fd, + 0x00efd007, +/* 0x0672: ctx_86c */ + 0xe7f100f8, + 0xe4b6086c, + 0x00efd006, + 0x8a14e7f1, + 0xf440e3f0, + 0xe7f18d21, + 0xe3f0a86c, + 0x8d21f441, +/* 0x0692: ctx_load */ + 0x87f100f8, + 0x84b6083c, + 0xf094bd06, + 0x89d00599, + 0x0ca7f000, + 0xf1c921f4, + 0xb60a2417, + 0x10d00614, + 0x0037f100, + 0x0634b60b, + 0xf14032d0, + 0xb60a0c17, + 0x47f00614, + 0x0012d007, +/* 0x06cb: ctx_chan_wait_0 */ + 0xcf4014d0, + 0x44f04014, + 0xfa1bf41f, + 0xfe0032d0, + 0x2af0000b, + 0x0424b61f, + 0xf10220b6, + 0xb6083c87, + 0x94bd0684, + 0xd00899f0, + 0x17f10089, + 0x14b60a04, + 0x0012d006, + 0x0a2017f1, + 0xf00614b6, + 0x23f10227, + 0x12d08000, + 0x1017f000, + 0x030027f1, + 0xfa0223f0, + 0x03f80512, + 0x085c87f1, + 0xbd0684b6, + 0x0899f094, + 0x980089d0, + 0x14b6c101, + 0xc0029818, + 0xfd0825b6, + 0x01800512, + 0x3c87f116, + 0x0684b608, + 0x99f094bd, + 0x0089d009, + 0x0a0427f1, + 0xd00624b6, + 0x27f00021, + 0x2017f101, + 0x0614b60a, + 0xf10012d0, + 0xf0020017, + 0x01fa0613, + 0xf103f805, + 0xb6085c87, + 0x94bd0684, + 0xd00999f0, + 0x87f10089, + 0x84b6085c, + 0xf094bd06, + 0x89d00599, +/* 0x078f: ctx_chan */ + 0xf500f800, + 0xf0069221, + 0x21f40ca7, + 0x1017f1c9, + 0x0614b60a, + 0xd00527f0, +/* 0x07a6: ctx_chan_wait */ + 0x12cf0012, + 0x0522fd00, + 0xf8fa1bf4, +/* 0x07b1: ctx_mmio_exec */ + 0x81039800, + 0x0a0427f1, + 0xd00624b6, + 0x34bd0023, +/* 0x07c0: ctx_mmio_loop */ + 0xf4ff34c4, + 0x57f10f1b, + 0x53f00300, + 0x0535fa06, +/* 0x07d2: ctx_mmio_pull */ + 0x4e9803f8, + 0xc14f98c0, + 0xb68d21f4, + 0x12b60830, + 0xdf1bf401, +/* 0x07e4: ctx_mmio_done */ + 0xd0160398, + 0x00800023, + 0x0017f180, + 0x0613f002, + 0xf80601fa, +/* 0x07fb: ctx_xfer */ + 0xf100f803, + 0xb60c00f7, + 0xe7f006f4, + 0x80fed004, +/* 0x0808: ctx_xfer_idle */ + 0xf100fecf, + 0xf42000e4, + 0x11f4f91b, + 0x0d02f406, +/* 0x0818: ctx_xfer_pre */ + 0xf510f7f0, + 0xf4067221, +/* 0x0822: ctx_xfer_pre_load */ + 0xf7f01c11, + 0x3121f502, + 0x4021f506, + 0x5221f506, + 0xf5f4bd06, + 0xf5063121, +/* 0x083b: ctx_xfer_exec */ + 0x98069221, + 0x27f11601, + 0x24b60414, + 0x0020d006, + 0xa500e7f1, + 0xb941e3f0, + 0x21f4021f, + 0x04e0b68d, + 0xf001fcf0, + 0x24b6022c, + 0x05f2fd01, + 0xf18d21f4, + 0xf04afc17, + 0x27f00213, + 0x0012d00c, + 0x020721f5, + 0x47fc27f1, + 0xd00223f0, + 0x2cf00020, + 0x0320b601, + 0xf00012d0, + 0xa5f001ac, + 0x00b7f006, + 0x98140c98, + 0xe7f0150d, + 0x5c21f500, + 0x08a7f001, + 0x010321f5, + 0x020721f5, + 0xf02201f4, + 0x21f40ca7, + 0x1017f1c9, + 0x0614b60a, + 0xd00527f0, +/* 0x08c2: ctx_xfer_post_save_wait */ + 0x12cf0012, + 0x0522fd00, + 0xf4fa1bf4, +/* 0x08ce: ctx_xfer_post */ + 0xf7f02e02, + 0x3121f502, + 0xf5f4bd06, + 0xf5067221, + 0xf5022621, + 0xbd064021, + 0x3121f5f4, + 0x1011f406, + 0xfd800198, + 0x0bf40511, + 0xb121f507, +/* 0x08f9: ctx_xfer_no_post_mmio */ +/* 0x08f9: ctx_xfer_done */ + 0x0000f807, + 0x00000000, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nvc0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nvc0.fuc new file mode 100644 index 0000000..e6b2288 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nvc0.fuc @@ -0,0 +1,400 @@ +/* fuc microcode util functions for nvc0 PGRAPH + * + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)') +define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))') + +ifdef(`include_code', ` +// Error codes +define(`E_BAD_COMMAND', 0x01) +define(`E_CMD_OVERFLOW', 0x02) + +// Util macros to help with debugging ucode hangs etc +define(`T_WAIT', 0) +define(`T_MMCTX', 1) +define(`T_STRWAIT', 2) +define(`T_STRINIT', 3) +define(`T_AUTO', 4) +define(`T_CHAN', 5) +define(`T_LOAD', 6) +define(`T_SAVE', 7) +define(`T_LCHAN', 8) +define(`T_LCTXH', 9) + +define(`trace_set', ` + mov $r8 0x83c + shl b32 $r8 6 + clear b32 $r9 + bset $r9 $1 + iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7] +') + +define(`trace_clr', ` + mov $r8 0x85c + shl b32 $r8 6 + clear b32 $r9 + bset $r9 $1 + iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7] +') + +// queue_put - add request to queue +// +// In : $r13 queue pointer +// $r14 command +// $r15 data +// +queue_put: + // make sure we have space.. + ld b32 $r8 D[$r13 + 0x0] // GET + ld b32 $r9 D[$r13 + 0x4] // PUT + xor $r8 8 + cmpu b32 $r8 $r9 + bra ne #queue_put_next + mov $r15 E_CMD_OVERFLOW + call #error + ret + + // store cmd/data on queue + queue_put_next: + and $r8 $r9 7 + shl b32 $r8 3 + add b32 $r8 $r13 + add b32 $r8 8 + st b32 D[$r8 + 0x0] $r14 + st b32 D[$r8 + 0x4] $r15 + + // update PUT + add b32 $r9 1 + and $r9 0xf + st b32 D[$r13 + 0x4] $r9 + ret + +// queue_get - fetch request from queue +// +// In : $r13 queue pointer +// +// Out: $p1 clear on success (data available) +// $r14 command +// $r15 data +// +queue_get: + bset $flags $p1 + ld b32 $r8 D[$r13 + 0x0] // GET + ld b32 $r9 D[$r13 + 0x4] // PUT + cmpu b32 $r8 $r9 + bra e #queue_get_done + // fetch first cmd/data pair + and $r9 $r8 7 + shl b32 $r9 3 + add b32 $r9 $r13 + add b32 $r9 8 + ld b32 $r14 D[$r9 + 0x0] + ld b32 $r15 D[$r9 + 0x4] + + // update GET + add b32 $r8 1 + and $r8 0xf + st b32 D[$r13 + 0x0] $r8 + bclr $flags $p1 +queue_get_done: + ret + +// nv_rd32 - read 32-bit value from nv register +// +// In : $r14 register +// Out: $r15 value +// +nv_rd32: + mov $r11 0x728 + shl b32 $r11 6 + mov b32 $r12 $r14 + bset $r12 31 // MMIO_CTRL_PENDING + iowr I[$r11 + 0x000] $r12 // MMIO_CTRL + nv_rd32_wait: + iord $r12 I[$r11 + 0x000] + xbit $r12 $r12 31 + bra ne #nv_rd32_wait + mov $r10 6 // DONE_MMIO_RD + call #wait_doneo + iord $r15 I[$r11 + 0x100] // MMIO_RDVAL + ret + +// nv_wr32 - write 32-bit value to nv register +// +// In : $r14 register +// $r15 value +// +nv_wr32: + mov $r11 0x728 + shl b32 $r11 6 + iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL + mov b32 $r12 $r14 + bset $r12 31 // MMIO_CTRL_PENDING + bset $r12 30 // MMIO_CTRL_WRITE + iowr I[$r11 + 0x000] $r12 // MMIO_CTRL + nv_wr32_wait: + iord $r12 I[$r11 + 0x000] + xbit $r12 $r12 31 + bra ne #nv_wr32_wait + ret + +// (re)set watchdog timer +// +// In : $r15 timeout +// +watchdog_reset: + mov $r8 0x430 + shl b32 $r8 6 + bset $r15 31 + iowr I[$r8 + 0x000] $r15 + ret + +// clear watchdog timer +watchdog_clear: + mov $r8 0x430 + shl b32 $r8 6 + iowr I[$r8 + 0x000] $r0 + ret + +// wait_done{z,o} - wait on FUC_DONE bit to become clear/set +// +// In : $r10 bit to wait on +// +define(`wait_done', ` +$1: + trace_set(T_WAIT); + mov $r8 0x818 + shl b32 $r8 6 + iowr I[$r8 + 0x000] $r10 // CC_SCRATCH[6] = wait bit + wait_done_$1: + mov $r8 0x400 + shl b32 $r8 6 + iord $r8 I[$r8 + 0x000] // DONE + xbit $r8 $r8 $r10 + bra $2 #wait_done_$1 + trace_clr(T_WAIT) + ret +') +wait_done(wait_donez, ne) +wait_done(wait_doneo, e) + +// mmctx_size - determine size of a mmio list transfer +// +// In : $r14 mmio list head +// $r15 mmio list tail +// Out: $r15 transfer size (in bytes) +// +mmctx_size: + clear b32 $r9 + nv_mmctx_size_loop: + ld b32 $r8 D[$r14] + shr b32 $r8 26 + add b32 $r8 1 + shl b32 $r8 2 + add b32 $r9 $r8 + add b32 $r14 4 + cmpu b32 $r14 $r15 + bra ne #nv_mmctx_size_loop + mov b32 $r15 $r9 + ret + +// mmctx_xfer - execute a list of mmio transfers +// +// In : $r10 flags +// bit 0: direction (0 = save, 1 = load) +// bit 1: set if first transfer +// bit 2: set if last transfer +// $r11 base +// $r12 mmio list head +// $r13 mmio list tail +// $r14 multi_stride +// $r15 multi_mask +// +mmctx_xfer: + trace_set(T_MMCTX) + mov $r8 0x710 + shl b32 $r8 6 + clear b32 $r9 + or $r11 $r11 + bra e #mmctx_base_disabled + iowr I[$r8 + 0x000] $r11 // MMCTX_BASE + bset $r9 0 // BASE_EN + mmctx_base_disabled: + or $r14 $r14 + bra e #mmctx_multi_disabled + iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE + iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK + bset $r9 1 // MULTI_EN + mmctx_multi_disabled: + add b32 $r8 0x100 + + xbit $r11 $r10 0 + shl b32 $r11 16 // DIR + bset $r11 12 // QLIMIT = 0x10 + xbit $r14 $r10 1 + shl b32 $r14 17 + or $r11 $r14 // START_TRIGGER + iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL + + // loop over the mmio list, and send requests to the hw + mmctx_exec_loop: + // wait for space in mmctx queue + mmctx_wait_free: + iord $r14 I[$r8 + 0x000] // MMCTX_CTRL + and $r14 0x1f + bra e #mmctx_wait_free + + // queue up an entry + ld b32 $r14 D[$r12] + or $r14 $r9 + iowr I[$r8 + 0x300] $r14 + add b32 $r12 4 + cmpu b32 $r12 $r13 + bra ne #mmctx_exec_loop + + xbit $r11 $r10 2 + bra ne #mmctx_stop + // wait for queue to empty + mmctx_fini_wait: + iord $r11 I[$r8 + 0x000] // MMCTX_CTRL + and $r11 0x1f + cmpu b32 $r11 0x10 + bra ne #mmctx_fini_wait + mov $r10 2 // DONE_MMCTX + call #wait_donez + bra #mmctx_done + mmctx_stop: + xbit $r11 $r10 0 + shl b32 $r11 16 // DIR + bset $r11 12 // QLIMIT = 0x10 + bset $r11 18 // STOP_TRIGGER + iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL + mmctx_stop_wait: + // wait for STOP_TRIGGER to clear + iord $r11 I[$r8 + 0x000] // MMCTX_CTRL + xbit $r11 $r11 18 + bra ne #mmctx_stop_wait + mmctx_done: + trace_clr(T_MMCTX) + ret + +// Wait for DONE_STRAND +// +strand_wait: + push $r10 + mov $r10 2 + call #wait_donez + pop $r10 + ret + +// unknown - call before issuing strand commands +// +strand_pre: + mov $r8 0x4afc + sethi $r8 0x20000 + mov $r9 0xc + iowr I[$r8] $r9 + call #strand_wait + ret + +// unknown - call after issuing strand commands +// +strand_post: + mov $r8 0x4afc + sethi $r8 0x20000 + mov $r9 0xd + iowr I[$r8] $r9 + call #strand_wait + ret + +// Selects strand set?! +// +// In: $r14 id +// +strand_set: + mov $r10 0x4ffc + sethi $r10 0x20000 + sub b32 $r11 $r10 0x500 + mov $r12 0xf + iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf + mov $r12 0xb + iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb + call #strand_wait + iowr I[$r10 + 0x000] $r14 // 0x93c = <id> + mov $r12 0xa + iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa + call #strand_wait + ret + +// Initialise strand context data +// +// In : $r15 context base +// Out: $r15 context size (in bytes) +// +// Strandset(?) 3 hardcoded currently +// +strand_ctx_init: + trace_set(T_STRINIT) + call #strand_pre + mov $r14 3 + call #strand_set + mov $r10 0x46fc + sethi $r10 0x20000 + add b32 $r11 $r10 0x400 + iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0 + mov $r12 1 + iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE + call #strand_wait + sub b32 $r12 $r0 1 + iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff + mov $r12 2 + iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT + call #strand_wait + call #strand_post + + // read the size of each strand, poke the context offset of + // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry + // about it later then. + mov $r8 0x880 + shl b32 $r8 6 + iord $r9 I[$r8 + 0x000] // STRANDS + add b32 $r8 0x2200 + shr b32 $r14 $r15 8 + ctx_init_strand_loop: + iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE + iowr I[$r8 + 0x100] $r14 // STRAND_LOAD_SWBASE + iord $r10 I[$r8 + 0x200] // STRAND_SIZE + shr b32 $r10 6 + add b32 $r10 1 + add b32 $r14 $r10 + add b32 $r8 4 + sub b32 $r9 1 + bra ne #ctx_init_strand_loop + + shl b32 $r14 8 + sub b32 $r15 $r14 $r15 + trace_clr(T_STRINIT) + ret +') diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc new file mode 100644 index 0000000..f16a5d5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc @@ -0,0 +1,400 @@ +/* fuc microcode util functions for nve0 PGRAPH + * + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)') +define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))') + +ifdef(`include_code', ` +// Error codes +define(`E_BAD_COMMAND', 0x01) +define(`E_CMD_OVERFLOW', 0x02) + +// Util macros to help with debugging ucode hangs etc +define(`T_WAIT', 0) +define(`T_MMCTX', 1) +define(`T_STRWAIT', 2) +define(`T_STRINIT', 3) +define(`T_AUTO', 4) +define(`T_CHAN', 5) +define(`T_LOAD', 6) +define(`T_SAVE', 7) +define(`T_LCHAN', 8) +define(`T_LCTXH', 9) + +define(`trace_set', ` + mov $r8 0x83c + shl b32 $r8 6 + clear b32 $r9 + bset $r9 $1 + iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7] +') + +define(`trace_clr', ` + mov $r8 0x85c + shl b32 $r8 6 + clear b32 $r9 + bset $r9 $1 + iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7] +') + +// queue_put - add request to queue +// +// In : $r13 queue pointer +// $r14 command +// $r15 data +// +queue_put: + // make sure we have space.. + ld b32 $r8 D[$r13 + 0x0] // GET + ld b32 $r9 D[$r13 + 0x4] // PUT + xor $r8 8 + cmpu b32 $r8 $r9 + bra ne #queue_put_next + mov $r15 E_CMD_OVERFLOW + call #error + ret + + // store cmd/data on queue + queue_put_next: + and $r8 $r9 7 + shl b32 $r8 3 + add b32 $r8 $r13 + add b32 $r8 8 + st b32 D[$r8 + 0x0] $r14 + st b32 D[$r8 + 0x4] $r15 + + // update PUT + add b32 $r9 1 + and $r9 0xf + st b32 D[$r13 + 0x4] $r9 + ret + +// queue_get - fetch request from queue +// +// In : $r13 queue pointer +// +// Out: $p1 clear on success (data available) +// $r14 command +// $r15 data +// +queue_get: + bset $flags $p1 + ld b32 $r8 D[$r13 + 0x0] // GET + ld b32 $r9 D[$r13 + 0x4] // PUT + cmpu b32 $r8 $r9 + bra e #queue_get_done + // fetch first cmd/data pair + and $r9 $r8 7 + shl b32 $r9 3 + add b32 $r9 $r13 + add b32 $r9 8 + ld b32 $r14 D[$r9 + 0x0] + ld b32 $r15 D[$r9 + 0x4] + + // update GET + add b32 $r8 1 + and $r8 0xf + st b32 D[$r13 + 0x0] $r8 + bclr $flags $p1 +queue_get_done: + ret + +// nv_rd32 - read 32-bit value from nv register +// +// In : $r14 register +// Out: $r15 value +// +nv_rd32: + mov $r11 0x728 + shl b32 $r11 6 + mov b32 $r12 $r14 + bset $r12 31 // MMIO_CTRL_PENDING + iowr I[$r11 + 0x000] $r12 // MMIO_CTRL + nv_rd32_wait: + iord $r12 I[$r11 + 0x000] + xbit $r12 $r12 31 + bra ne #nv_rd32_wait + mov $r10 6 // DONE_MMIO_RD + call #wait_doneo + iord $r15 I[$r11 + 0x100] // MMIO_RDVAL + ret + +// nv_wr32 - write 32-bit value to nv register +// +// In : $r14 register +// $r15 value +// +nv_wr32: + mov $r11 0x728 + shl b32 $r11 6 + iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL + mov b32 $r12 $r14 + bset $r12 31 // MMIO_CTRL_PENDING + bset $r12 30 // MMIO_CTRL_WRITE + iowr I[$r11 + 0x000] $r12 // MMIO_CTRL + nv_wr32_wait: + iord $r12 I[$r11 + 0x000] + xbit $r12 $r12 31 + bra ne #nv_wr32_wait + ret + +// (re)set watchdog timer +// +// In : $r15 timeout +// +watchdog_reset: + mov $r8 0x430 + shl b32 $r8 6 + bset $r15 31 + iowr I[$r8 + 0x000] $r15 + ret + +// clear watchdog timer +watchdog_clear: + mov $r8 0x430 + shl b32 $r8 6 + iowr I[$r8 + 0x000] $r0 + ret + +// wait_done{z,o} - wait on FUC_DONE bit to become clear/set +// +// In : $r10 bit to wait on +// +define(`wait_done', ` +$1: + trace_set(T_WAIT); + mov $r8 0x818 + shl b32 $r8 6 + iowr I[$r8 + 0x000] $r10 // CC_SCRATCH[6] = wait bit + wait_done_$1: + mov $r8 0x400 + shl b32 $r8 6 + iord $r8 I[$r8 + 0x000] // DONE + xbit $r8 $r8 $r10 + bra $2 #wait_done_$1 + trace_clr(T_WAIT) + ret +') +wait_done(wait_donez, ne) +wait_done(wait_doneo, e) + +// mmctx_size - determine size of a mmio list transfer +// +// In : $r14 mmio list head +// $r15 mmio list tail +// Out: $r15 transfer size (in bytes) +// +mmctx_size: + clear b32 $r9 + nv_mmctx_size_loop: + ld b32 $r8 D[$r14] + shr b32 $r8 26 + add b32 $r8 1 + shl b32 $r8 2 + add b32 $r9 $r8 + add b32 $r14 4 + cmpu b32 $r14 $r15 + bra ne #nv_mmctx_size_loop + mov b32 $r15 $r9 + ret + +// mmctx_xfer - execute a list of mmio transfers +// +// In : $r10 flags +// bit 0: direction (0 = save, 1 = load) +// bit 1: set if first transfer +// bit 2: set if last transfer +// $r11 base +// $r12 mmio list head +// $r13 mmio list tail +// $r14 multi_stride +// $r15 multi_mask +// +mmctx_xfer: + trace_set(T_MMCTX) + mov $r8 0x710 + shl b32 $r8 6 + clear b32 $r9 + or $r11 $r11 + bra e #mmctx_base_disabled + iowr I[$r8 + 0x000] $r11 // MMCTX_BASE + bset $r9 0 // BASE_EN + mmctx_base_disabled: + or $r14 $r14 + bra e #mmctx_multi_disabled + iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE + iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK + bset $r9 1 // MULTI_EN + mmctx_multi_disabled: + add b32 $r8 0x100 + + xbit $r11 $r10 0 + shl b32 $r11 16 // DIR + bset $r11 12 // QLIMIT = 0x10 + xbit $r14 $r10 1 + shl b32 $r14 17 + or $r11 $r14 // START_TRIGGER + iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL + + // loop over the mmio list, and send requests to the hw + mmctx_exec_loop: + // wait for space in mmctx queue + mmctx_wait_free: + iord $r14 I[$r8 + 0x000] // MMCTX_CTRL + and $r14 0x1f + bra e #mmctx_wait_free + + // queue up an entry + ld b32 $r14 D[$r12] + or $r14 $r9 + iowr I[$r8 + 0x300] $r14 + add b32 $r12 4 + cmpu b32 $r12 $r13 + bra ne #mmctx_exec_loop + + xbit $r11 $r10 2 + bra ne #mmctx_stop + // wait for queue to empty + mmctx_fini_wait: + iord $r11 I[$r8 + 0x000] // MMCTX_CTRL + and $r11 0x1f + cmpu b32 $r11 0x10 + bra ne #mmctx_fini_wait + mov $r10 2 // DONE_MMCTX + call #wait_donez + bra #mmctx_done + mmctx_stop: + xbit $r11 $r10 0 + shl b32 $r11 16 // DIR + bset $r11 12 // QLIMIT = 0x10 + bset $r11 18 // STOP_TRIGGER + iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL + mmctx_stop_wait: + // wait for STOP_TRIGGER to clear + iord $r11 I[$r8 + 0x000] // MMCTX_CTRL + xbit $r11 $r11 18 + bra ne #mmctx_stop_wait + mmctx_done: + trace_clr(T_MMCTX) + ret + +// Wait for DONE_STRAND +// +strand_wait: + push $r10 + mov $r10 2 + call #wait_donez + pop $r10 + ret + +// unknown - call before issuing strand commands +// +strand_pre: + mov $r8 0x4afc + sethi $r8 0x20000 + mov $r9 0xc + iowr I[$r8] $r9 + call #strand_wait + ret + +// unknown - call after issuing strand commands +// +strand_post: + mov $r8 0x4afc + sethi $r8 0x20000 + mov $r9 0xd + iowr I[$r8] $r9 + call #strand_wait + ret + +// Selects strand set?! +// +// In: $r14 id +// +strand_set: + mov $r10 0x4ffc + sethi $r10 0x20000 + sub b32 $r11 $r10 0x500 + mov $r12 0xf + iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf + mov $r12 0xb + iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb + call #strand_wait + iowr I[$r10 + 0x000] $r14 // 0x93c = <id> + mov $r12 0xa + iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa + call #strand_wait + ret + +// Initialise strand context data +// +// In : $r15 context base +// Out: $r15 context size (in bytes) +// +// Strandset(?) 3 hardcoded currently +// +strand_ctx_init: + trace_set(T_STRINIT) + call #strand_pre + mov $r14 3 + call #strand_set + mov $r10 0x46fc + sethi $r10 0x20000 + add b32 $r11 $r10 0x400 + iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0 + mov $r12 1 + iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE + call #strand_wait + sub b32 $r12 $r0 1 + iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff + mov $r12 2 + iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT + call #strand_wait + call #strand_post + + // read the size of each strand, poke the context offset of + // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry + // about it later then. + mov $r8 0x880 + shl b32 $r8 6 + iord $r9 I[$r8 + 0x000] // STRANDS + add b32 $r8 0x2200 + shr b32 $r14 $r15 8 + ctx_init_strand_loop: + iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE + iowr I[$r8 + 0x100] $r14 // STRAND_LOAD_SWBASE + iord $r10 I[$r8 + 0x200] // STRAND_SIZE + shr b32 $r10 6 + add b32 $r10 1 + add b32 $r14 $r10 + add b32 $r8 4 + sub b32 $r9 1 + bra ne #ctx_init_strand_loop + + shl b32 $r14 8 + sub b32 $r15 $r14 $r15 + trace_clr(T_STRINIT) + ret +') diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c new file mode 100644 index 0000000..ad13dcd --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c @@ -0,0 +1,1389 @@ +/* + * Copyright 2007 Stephane Marchesin + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include <core/client.h> +#include <core/os.h> +#include <core/class.h> +#include <core/handle.h> +#include <core/namedb.h> + +#include <subdev/fb.h> +#include <subdev/instmem.h> +#include <subdev/timer.h> + +#include <engine/fifo.h> +#include <engine/graph.h> + +#include "regs.h" + +static u32 +nv04_graph_ctx_regs[] = { + 0x0040053c, + 0x00400544, + 0x00400540, + 0x00400548, + NV04_PGRAPH_CTX_SWITCH1, + NV04_PGRAPH_CTX_SWITCH2, + NV04_PGRAPH_CTX_SWITCH3, + NV04_PGRAPH_CTX_SWITCH4, + NV04_PGRAPH_CTX_CACHE1, + NV04_PGRAPH_CTX_CACHE2, + NV04_PGRAPH_CTX_CACHE3, + NV04_PGRAPH_CTX_CACHE4, + 0x00400184, + 0x004001a4, + 0x004001c4, + 0x004001e4, + 0x00400188, + 0x004001a8, + 0x004001c8, + 0x004001e8, + 0x0040018c, + 0x004001ac, + 0x004001cc, + 0x004001ec, + 0x00400190, + 0x004001b0, + 0x004001d0, + 0x004001f0, + 0x00400194, + 0x004001b4, + 0x004001d4, + 0x004001f4, + 0x00400198, + 0x004001b8, + 0x004001d8, + 0x004001f8, + 0x0040019c, + 0x004001bc, + 0x004001dc, + 0x004001fc, + 0x00400174, + NV04_PGRAPH_DMA_START_0, + NV04_PGRAPH_DMA_START_1, + NV04_PGRAPH_DMA_LENGTH, + NV04_PGRAPH_DMA_MISC, + NV04_PGRAPH_DMA_PITCH, + NV04_PGRAPH_BOFFSET0, + NV04_PGRAPH_BBASE0, + NV04_PGRAPH_BLIMIT0, + NV04_PGRAPH_BOFFSET1, + NV04_PGRAPH_BBASE1, + NV04_PGRAPH_BLIMIT1, + NV04_PGRAPH_BOFFSET2, + NV04_PGRAPH_BBASE2, + NV04_PGRAPH_BLIMIT2, + NV04_PGRAPH_BOFFSET3, + NV04_PGRAPH_BBASE3, + NV04_PGRAPH_BLIMIT3, + NV04_PGRAPH_BOFFSET4, + NV04_PGRAPH_BBASE4, + NV04_PGRAPH_BLIMIT4, + NV04_PGRAPH_BOFFSET5, + NV04_PGRAPH_BBASE5, + NV04_PGRAPH_BLIMIT5, + NV04_PGRAPH_BPITCH0, + NV04_PGRAPH_BPITCH1, + NV04_PGRAPH_BPITCH2, + NV04_PGRAPH_BPITCH3, + NV04_PGRAPH_BPITCH4, + NV04_PGRAPH_SURFACE, + NV04_PGRAPH_STATE, + NV04_PGRAPH_BSWIZZLE2, + NV04_PGRAPH_BSWIZZLE5, + NV04_PGRAPH_BPIXEL, + NV04_PGRAPH_NOTIFY, + NV04_PGRAPH_PATT_COLOR0, + NV04_PGRAPH_PATT_COLOR1, + NV04_PGRAPH_PATT_COLORRAM+0x00, + NV04_PGRAPH_PATT_COLORRAM+0x04, + NV04_PGRAPH_PATT_COLORRAM+0x08, + NV04_PGRAPH_PATT_COLORRAM+0x0c, + NV04_PGRAPH_PATT_COLORRAM+0x10, + NV04_PGRAPH_PATT_COLORRAM+0x14, + NV04_PGRAPH_PATT_COLORRAM+0x18, + NV04_PGRAPH_PATT_COLORRAM+0x1c, + NV04_PGRAPH_PATT_COLORRAM+0x20, + NV04_PGRAPH_PATT_COLORRAM+0x24, + NV04_PGRAPH_PATT_COLORRAM+0x28, + NV04_PGRAPH_PATT_COLORRAM+0x2c, + NV04_PGRAPH_PATT_COLORRAM+0x30, + NV04_PGRAPH_PATT_COLORRAM+0x34, + NV04_PGRAPH_PATT_COLORRAM+0x38, + NV04_PGRAPH_PATT_COLORRAM+0x3c, + NV04_PGRAPH_PATT_COLORRAM+0x40, + NV04_PGRAPH_PATT_COLORRAM+0x44, + NV04_PGRAPH_PATT_COLORRAM+0x48, + NV04_PGRAPH_PATT_COLORRAM+0x4c, + NV04_PGRAPH_PATT_COLORRAM+0x50, + NV04_PGRAPH_PATT_COLORRAM+0x54, + NV04_PGRAPH_PATT_COLORRAM+0x58, + NV04_PGRAPH_PATT_COLORRAM+0x5c, + NV04_PGRAPH_PATT_COLORRAM+0x60, + NV04_PGRAPH_PATT_COLORRAM+0x64, + NV04_PGRAPH_PATT_COLORRAM+0x68, + NV04_PGRAPH_PATT_COLORRAM+0x6c, + NV04_PGRAPH_PATT_COLORRAM+0x70, + NV04_PGRAPH_PATT_COLORRAM+0x74, + NV04_PGRAPH_PATT_COLORRAM+0x78, + NV04_PGRAPH_PATT_COLORRAM+0x7c, + NV04_PGRAPH_PATT_COLORRAM+0x80, + NV04_PGRAPH_PATT_COLORRAM+0x84, + NV04_PGRAPH_PATT_COLORRAM+0x88, + NV04_PGRAPH_PATT_COLORRAM+0x8c, + NV04_PGRAPH_PATT_COLORRAM+0x90, + NV04_PGRAPH_PATT_COLORRAM+0x94, + NV04_PGRAPH_PATT_COLORRAM+0x98, + NV04_PGRAPH_PATT_COLORRAM+0x9c, + NV04_PGRAPH_PATT_COLORRAM+0xa0, + NV04_PGRAPH_PATT_COLORRAM+0xa4, + NV04_PGRAPH_PATT_COLORRAM+0xa8, + NV04_PGRAPH_PATT_COLORRAM+0xac, + NV04_PGRAPH_PATT_COLORRAM+0xb0, + NV04_PGRAPH_PATT_COLORRAM+0xb4, + NV04_PGRAPH_PATT_COLORRAM+0xb8, + NV04_PGRAPH_PATT_COLORRAM+0xbc, + NV04_PGRAPH_PATT_COLORRAM+0xc0, + NV04_PGRAPH_PATT_COLORRAM+0xc4, + NV04_PGRAPH_PATT_COLORRAM+0xc8, + NV04_PGRAPH_PATT_COLORRAM+0xcc, + NV04_PGRAPH_PATT_COLORRAM+0xd0, + NV04_PGRAPH_PATT_COLORRAM+0xd4, + NV04_PGRAPH_PATT_COLORRAM+0xd8, + NV04_PGRAPH_PATT_COLORRAM+0xdc, + NV04_PGRAPH_PATT_COLORRAM+0xe0, + NV04_PGRAPH_PATT_COLORRAM+0xe4, + NV04_PGRAPH_PATT_COLORRAM+0xe8, + NV04_PGRAPH_PATT_COLORRAM+0xec, + NV04_PGRAPH_PATT_COLORRAM+0xf0, + NV04_PGRAPH_PATT_COLORRAM+0xf4, + NV04_PGRAPH_PATT_COLORRAM+0xf8, + NV04_PGRAPH_PATT_COLORRAM+0xfc, + NV04_PGRAPH_PATTERN, + 0x0040080c, + NV04_PGRAPH_PATTERN_SHAPE, + 0x00400600, + NV04_PGRAPH_ROP3, + NV04_PGRAPH_CHROMA, + NV04_PGRAPH_BETA_AND, + NV04_PGRAPH_BETA_PREMULT, + NV04_PGRAPH_CONTROL0, + NV04_PGRAPH_CONTROL1, + NV04_PGRAPH_CONTROL2, + NV04_PGRAPH_BLEND, + NV04_PGRAPH_STORED_FMT, + NV04_PGRAPH_SOURCE_COLOR, + 0x00400560, + 0x00400568, + 0x00400564, + 0x0040056c, + 0x00400400, + 0x00400480, + 0x00400404, + 0x00400484, + 0x00400408, + 0x00400488, + 0x0040040c, + 0x0040048c, + 0x00400410, + 0x00400490, + 0x00400414, + 0x00400494, + 0x00400418, + 0x00400498, + 0x0040041c, + 0x0040049c, + 0x00400420, + 0x004004a0, + 0x00400424, + 0x004004a4, + 0x00400428, + 0x004004a8, + 0x0040042c, + 0x004004ac, + 0x00400430, + 0x004004b0, + 0x00400434, + 0x004004b4, + 0x00400438, + 0x004004b8, + 0x0040043c, + 0x004004bc, + 0x00400440, + 0x004004c0, + 0x00400444, + 0x004004c4, + 0x00400448, + 0x004004c8, + 0x0040044c, + 0x004004cc, + 0x00400450, + 0x004004d0, + 0x00400454, + 0x004004d4, + 0x00400458, + 0x004004d8, + 0x0040045c, + 0x004004dc, + 0x00400460, + 0x004004e0, + 0x00400464, + 0x004004e4, + 0x00400468, + 0x004004e8, + 0x0040046c, + 0x004004ec, + 0x00400470, + 0x004004f0, + 0x00400474, + 0x004004f4, + 0x00400478, + 0x004004f8, + 0x0040047c, + 0x004004fc, + 0x00400534, + 0x00400538, + 0x00400514, + 0x00400518, + 0x0040051c, + 0x00400520, + 0x00400524, + 0x00400528, + 0x0040052c, + 0x00400530, + 0x00400d00, + 0x00400d40, + 0x00400d80, + 0x00400d04, + 0x00400d44, + 0x00400d84, + 0x00400d08, + 0x00400d48, + 0x00400d88, + 0x00400d0c, + 0x00400d4c, + 0x00400d8c, + 0x00400d10, + 0x00400d50, + 0x00400d90, + 0x00400d14, + 0x00400d54, + 0x00400d94, + 0x00400d18, + 0x00400d58, + 0x00400d98, + 0x00400d1c, + 0x00400d5c, + 0x00400d9c, + 0x00400d20, + 0x00400d60, + 0x00400da0, + 0x00400d24, + 0x00400d64, + 0x00400da4, + 0x00400d28, + 0x00400d68, + 0x00400da8, + 0x00400d2c, + 0x00400d6c, + 0x00400dac, + 0x00400d30, + 0x00400d70, + 0x00400db0, + 0x00400d34, + 0x00400d74, + 0x00400db4, + 0x00400d38, + 0x00400d78, + 0x00400db8, + 0x00400d3c, + 0x00400d7c, + 0x00400dbc, + 0x00400590, + 0x00400594, + 0x00400598, + 0x0040059c, + 0x004005a8, + 0x004005ac, + 0x004005b0, + 0x004005b4, + 0x004005c0, + 0x004005c4, + 0x004005c8, + 0x004005cc, + 0x004005d0, + 0x004005d4, + 0x004005d8, + 0x004005dc, + 0x004005e0, + NV04_PGRAPH_PASSTHRU_0, + NV04_PGRAPH_PASSTHRU_1, + NV04_PGRAPH_PASSTHRU_2, + NV04_PGRAPH_DVD_COLORFMT, + NV04_PGRAPH_SCALED_FORMAT, + NV04_PGRAPH_MISC24_0, + NV04_PGRAPH_MISC24_1, + NV04_PGRAPH_MISC24_2, + 0x00400500, + 0x00400504, + NV04_PGRAPH_VALID1, + NV04_PGRAPH_VALID2, + NV04_PGRAPH_DEBUG_3 +}; + +struct nv04_graph_priv { + struct nouveau_graph base; + struct nv04_graph_chan *chan[16]; + spinlock_t lock; +}; + +struct nv04_graph_chan { + struct nouveau_object base; + int chid; + u32 nv04[ARRAY_SIZE(nv04_graph_ctx_regs)]; +}; + + +static inline struct nv04_graph_priv * +nv04_graph_priv(struct nv04_graph_chan *chan) +{ + return (void *)nv_object(chan)->engine; +} + +/******************************************************************************* + * Graphics object classes + ******************************************************************************/ + +/* + * Software methods, why they are needed, and how they all work: + * + * NV04 and NV05 keep most of the state in PGRAPH context itself, but some + * 2d engine settings are kept inside the grobjs themselves. The grobjs are + * 3 words long on both. grobj format on NV04 is: + * + * word 0: + * - bits 0-7: class + * - bit 12: color key active + * - bit 13: clip rect active + * - bit 14: if set, destination surface is swizzled and taken from buffer 5 + * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken + * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or + * NV03_CONTEXT_SURFACE_DST]. + * - bits 15-17: 2d operation [aka patch config] + * - bit 24: patch valid [enables rendering using this object] + * - bit 25: surf3d valid [for tex_tri and multitex_tri only] + * word 1: + * - bits 0-1: mono format + * - bits 8-13: color format + * - bits 16-31: DMA_NOTIFY instance + * word 2: + * - bits 0-15: DMA_A instance + * - bits 16-31: DMA_B instance + * + * On NV05 it's: + * + * word 0: + * - bits 0-7: class + * - bit 12: color key active + * - bit 13: clip rect active + * - bit 14: if set, destination surface is swizzled and taken from buffer 5 + * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken + * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or + * NV03_CONTEXT_SURFACE_DST]. + * - bits 15-17: 2d operation [aka patch config] + * - bits 20-22: dither mode + * - bit 24: patch valid [enables rendering using this object] + * - bit 25: surface_dst/surface_color/surf2d/surf3d valid + * - bit 26: surface_src/surface_zeta valid + * - bit 27: pattern valid + * - bit 28: rop valid + * - bit 29: beta1 valid + * - bit 30: beta4 valid + * word 1: + * - bits 0-1: mono format + * - bits 8-13: color format + * - bits 16-31: DMA_NOTIFY instance + * word 2: + * - bits 0-15: DMA_A instance + * - bits 16-31: DMA_B instance + * + * NV05 will set/unset the relevant valid bits when you poke the relevant + * object-binding methods with object of the proper type, or with the NULL + * type. It'll only allow rendering using the grobj if all needed objects + * are bound. The needed set of objects depends on selected operation: for + * example rop object is needed by ROP_AND, but not by SRCCOPY_AND. + * + * NV04 doesn't have these methods implemented at all, and doesn't have the + * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24 + * is set. So we have to emulate them in software, internally keeping the + * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04, + * but the last word isn't actually used for anything, we abuse it for this + * purpose. + * + * Actually, NV05 can optionally check bit 24 too, but we disable this since + * there's no use for it. + * + * For unknown reasons, NV04 implements surf3d binding in hardware as an + * exception. Also for unknown reasons, NV04 doesn't implement the clipping + * methods on the surf3d object, so we have to emulate them too. + */ + +static void +nv04_graph_set_ctx1(struct nouveau_object *object, u32 mask, u32 value) +{ + struct nv04_graph_priv *priv = (void *)object->engine; + int subc = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; + u32 tmp; + + tmp = nv_ro32(object, 0x00); + tmp &= ~mask; + tmp |= value; + nv_wo32(object, 0x00, tmp); + + nv_wr32(priv, NV04_PGRAPH_CTX_SWITCH1, tmp); + nv_wr32(priv, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp); +} + +static void +nv04_graph_set_ctx_val(struct nouveau_object *object, u32 mask, u32 value) +{ + int class, op, valid = 1; + u32 tmp, ctx1; + + ctx1 = nv_ro32(object, 0x00); + class = ctx1 & 0xff; + op = (ctx1 >> 15) & 7; + + tmp = nv_ro32(object, 0x0c); + tmp &= ~mask; + tmp |= value; + nv_wo32(object, 0x0c, tmp); + + /* check for valid surf2d/surf_dst/surf_color */ + if (!(tmp & 0x02000000)) + valid = 0; + /* check for valid surf_src/surf_zeta */ + if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000)) + valid = 0; + + switch (op) { + /* SRCCOPY_AND, SRCCOPY: no extra objects required */ + case 0: + case 3: + break; + /* ROP_AND: requires pattern and rop */ + case 1: + if (!(tmp & 0x18000000)) + valid = 0; + break; + /* BLEND_AND: requires beta1 */ + case 2: + if (!(tmp & 0x20000000)) + valid = 0; + break; + /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */ + case 4: + case 5: + if (!(tmp & 0x40000000)) + valid = 0; + break; + } + + nv04_graph_set_ctx1(object, 0x01000000, valid << 24); +} + +static int +nv04_graph_mthd_set_operation(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + u32 class = nv_ro32(object, 0) & 0xff; + u32 data = *(u32 *)args; + if (data > 5) + return 1; + /* Old versions of the objects only accept first three operations. */ + if (data > 2 && class < 0x40) + return 1; + nv04_graph_set_ctx1(object, 0x00038000, data << 15); + /* changing operation changes set of objects needed for validation */ + nv04_graph_set_ctx_val(object, 0, 0); + return 0; +} + +static int +nv04_graph_mthd_surf3d_clip_h(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + struct nv04_graph_priv *priv = (void *)object->engine; + u32 data = *(u32 *)args; + u32 min = data & 0xffff, max; + u32 w = data >> 16; + if (min & 0x8000) + /* too large */ + return 1; + if (w & 0x8000) + /* yes, it accepts negative for some reason. */ + w |= 0xffff0000; + max = min + w; + max &= 0x3ffff; + nv_wr32(priv, 0x40053c, min); + nv_wr32(priv, 0x400544, max); + return 0; +} + +static int +nv04_graph_mthd_surf3d_clip_v(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + struct nv04_graph_priv *priv = (void *)object->engine; + u32 data = *(u32 *)args; + u32 min = data & 0xffff, max; + u32 w = data >> 16; + if (min & 0x8000) + /* too large */ + return 1; + if (w & 0x8000) + /* yes, it accepts negative for some reason. */ + w |= 0xffff0000; + max = min + w; + max &= 0x3ffff; + nv_wr32(priv, 0x400540, min); + nv_wr32(priv, 0x400548, max); + return 0; +} + +static u16 +nv04_graph_mthd_bind_class(struct nouveau_object *object, u32 *args, u32 size) +{ + struct nouveau_instmem *imem = nouveau_instmem(object); + u32 inst = *(u32 *)args << 4; + return nv_ro32(imem, inst); +} + +static int +nv04_graph_mthd_bind_surf2d(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + switch (nv04_graph_mthd_bind_class(object, args, size)) { + case 0x30: + nv04_graph_set_ctx1(object, 0x00004000, 0); + nv04_graph_set_ctx_val(object, 0x02000000, 0); + return 0; + case 0x42: + nv04_graph_set_ctx1(object, 0x00004000, 0); + nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + switch (nv04_graph_mthd_bind_class(object, args, size)) { + case 0x30: + nv04_graph_set_ctx1(object, 0x00004000, 0); + nv04_graph_set_ctx_val(object, 0x02000000, 0); + return 0; + case 0x42: + nv04_graph_set_ctx1(object, 0x00004000, 0); + nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000); + return 0; + case 0x52: + nv04_graph_set_ctx1(object, 0x00004000, 0x00004000); + nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000); + return 0; + } + return 1; +} + +static int +nv01_graph_mthd_bind_patt(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + switch (nv04_graph_mthd_bind_class(object, args, size)) { + case 0x30: + nv04_graph_set_ctx_val(object, 0x08000000, 0); + return 0; + case 0x18: + nv04_graph_set_ctx_val(object, 0x08000000, 0x08000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_patt(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + switch (nv04_graph_mthd_bind_class(object, args, size)) { + case 0x30: + nv04_graph_set_ctx_val(object, 0x08000000, 0); + return 0; + case 0x44: + nv04_graph_set_ctx_val(object, 0x08000000, 0x08000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_rop(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + switch (nv04_graph_mthd_bind_class(object, args, size)) { + case 0x30: + nv04_graph_set_ctx_val(object, 0x10000000, 0); + return 0; + case 0x43: + nv04_graph_set_ctx_val(object, 0x10000000, 0x10000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_beta1(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + switch (nv04_graph_mthd_bind_class(object, args, size)) { + case 0x30: + nv04_graph_set_ctx_val(object, 0x20000000, 0); + return 0; + case 0x12: + nv04_graph_set_ctx_val(object, 0x20000000, 0x20000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_beta4(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + switch (nv04_graph_mthd_bind_class(object, args, size)) { + case 0x30: + nv04_graph_set_ctx_val(object, 0x40000000, 0); + return 0; + case 0x72: + nv04_graph_set_ctx_val(object, 0x40000000, 0x40000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_surf_dst(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + switch (nv04_graph_mthd_bind_class(object, args, size)) { + case 0x30: + nv04_graph_set_ctx_val(object, 0x02000000, 0); + return 0; + case 0x58: + nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_surf_src(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + switch (nv04_graph_mthd_bind_class(object, args, size)) { + case 0x30: + nv04_graph_set_ctx_val(object, 0x04000000, 0); + return 0; + case 0x59: + nv04_graph_set_ctx_val(object, 0x04000000, 0x04000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_surf_color(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + switch (nv04_graph_mthd_bind_class(object, args, size)) { + case 0x30: + nv04_graph_set_ctx_val(object, 0x02000000, 0); + return 0; + case 0x5a: + nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000); + return 0; + } + return 1; +} + +static int +nv04_graph_mthd_bind_surf_zeta(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + switch (nv04_graph_mthd_bind_class(object, args, size)) { + case 0x30: + nv04_graph_set_ctx_val(object, 0x04000000, 0); + return 0; + case 0x5b: + nv04_graph_set_ctx_val(object, 0x04000000, 0x04000000); + return 0; + } + return 1; +} + +static int +nv01_graph_mthd_bind_clip(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + switch (nv04_graph_mthd_bind_class(object, args, size)) { + case 0x30: + nv04_graph_set_ctx1(object, 0x2000, 0); + return 0; + case 0x19: + nv04_graph_set_ctx1(object, 0x2000, 0x2000); + return 0; + } + return 1; +} + +static int +nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + switch (nv04_graph_mthd_bind_class(object, args, size)) { + case 0x30: + nv04_graph_set_ctx1(object, 0x1000, 0); + return 0; + /* Yes, for some reason even the old versions of objects + * accept 0x57 and not 0x17. Consistency be damned. + */ + case 0x57: + nv04_graph_set_ctx1(object, 0x1000, 0x1000); + return 0; + } + return 1; +} + +static struct nouveau_omthds +nv03_graph_gdi_omthds[] = { + { 0x0184, 0x0184, nv01_graph_mthd_bind_patt }, + { 0x0188, 0x0188, nv04_graph_mthd_bind_rop }, + { 0x018c, 0x018c, nv04_graph_mthd_bind_beta1 }, + { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_dst }, + { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + {} +}; + +static struct nouveau_omthds +nv04_graph_gdi_omthds[] = { + { 0x0188, 0x0188, nv04_graph_mthd_bind_patt }, + { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 }, + { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + {} +}; + +static struct nouveau_omthds +nv01_graph_blit_omthds[] = { + { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, 0x0188, nv01_graph_mthd_bind_clip }, + { 0x018c, 0x018c, nv01_graph_mthd_bind_patt }, + { 0x0190, 0x0190, nv04_graph_mthd_bind_rop }, + { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 }, + { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst }, + { 0x019c, 0x019c, nv04_graph_mthd_bind_surf_src }, + { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + {} +}; + +static struct nouveau_omthds +nv04_graph_blit_omthds[] = { + { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, 0x0188, nv01_graph_mthd_bind_clip }, + { 0x018c, 0x018c, nv04_graph_mthd_bind_patt }, + { 0x0190, 0x0190, nv04_graph_mthd_bind_rop }, + { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 }, + { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 }, + { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + {} +}; + +static struct nouveau_omthds +nv04_graph_iifc_omthds[] = { + { 0x0188, 0x0188, nv01_graph_mthd_bind_chroma }, + { 0x018c, 0x018c, nv01_graph_mthd_bind_clip }, + { 0x0190, 0x0190, nv04_graph_mthd_bind_patt }, + { 0x0194, 0x0194, nv04_graph_mthd_bind_rop }, + { 0x0198, 0x0198, nv04_graph_mthd_bind_beta1 }, + { 0x019c, 0x019c, nv04_graph_mthd_bind_beta4 }, + { 0x01a0, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf }, + { 0x03e4, 0x03e4, nv04_graph_mthd_set_operation }, + {} +}; + +static struct nouveau_omthds +nv01_graph_ifc_omthds[] = { + { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, 0x0188, nv01_graph_mthd_bind_clip }, + { 0x018c, 0x018c, nv01_graph_mthd_bind_patt }, + { 0x0190, 0x0190, nv04_graph_mthd_bind_rop }, + { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 }, + { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst }, + { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + {} +}; + +static struct nouveau_omthds +nv04_graph_ifc_omthds[] = { + { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, 0x0188, nv01_graph_mthd_bind_clip }, + { 0x018c, 0x018c, nv04_graph_mthd_bind_patt }, + { 0x0190, 0x0190, nv04_graph_mthd_bind_rop }, + { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 }, + { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 }, + { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + {} +}; + +static struct nouveau_omthds +nv03_graph_sifc_omthds[] = { + { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, 0x0188, nv01_graph_mthd_bind_patt }, + { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst }, + { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + {} +}; + +static struct nouveau_omthds +nv04_graph_sifc_omthds[] = { + { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma }, + { 0x0188, 0x0188, nv04_graph_mthd_bind_patt }, + { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 }, + { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + {} +}; + +static struct nouveau_omthds +nv03_graph_sifm_omthds[] = { + { 0x0188, 0x0188, nv01_graph_mthd_bind_patt }, + { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst }, + { 0x0304, 0x0304, nv04_graph_mthd_set_operation }, + {} +}; + +static struct nouveau_omthds +nv04_graph_sifm_omthds[] = { + { 0x0188, 0x0188, nv04_graph_mthd_bind_patt }, + { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 }, + { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d }, + { 0x0304, 0x0304, nv04_graph_mthd_set_operation }, + {} +}; + +static struct nouveau_omthds +nv04_graph_surf3d_omthds[] = { + { 0x02f8, 0x02f8, nv04_graph_mthd_surf3d_clip_h }, + { 0x02fc, 0x02fc, nv04_graph_mthd_surf3d_clip_v }, + {} +}; + +static struct nouveau_omthds +nv03_graph_ttri_omthds[] = { + { 0x0188, 0x0188, nv01_graph_mthd_bind_clip }, + { 0x018c, 0x018c, nv04_graph_mthd_bind_surf_color }, + { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_zeta }, + {} +}; + +static struct nouveau_omthds +nv01_graph_prim_omthds[] = { + { 0x0184, 0x0184, nv01_graph_mthd_bind_clip }, + { 0x0188, 0x0188, nv01_graph_mthd_bind_patt }, + { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst }, + { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + {} +}; + +static struct nouveau_omthds +nv04_graph_prim_omthds[] = { + { 0x0184, 0x0184, nv01_graph_mthd_bind_clip }, + { 0x0188, 0x0188, nv04_graph_mthd_bind_patt }, + { 0x018c, 0x018c, nv04_graph_mthd_bind_rop }, + { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 }, + { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 }, + { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d }, + { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation }, + {} +}; + +static int +nv04_graph_object_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_gpuobj *obj; + int ret; + + ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent, + 16, 16, 0, &obj); + *pobject = nv_object(obj); + if (ret) + return ret; + + nv_wo32(obj, 0x00, nv_mclass(obj)); +#ifdef __BIG_ENDIAN + nv_mo32(obj, 0x00, 0x00080000, 0x00080000); +#endif + nv_wo32(obj, 0x04, 0x00000000); + nv_wo32(obj, 0x08, 0x00000000); + nv_wo32(obj, 0x0c, 0x00000000); + return 0; +} + +struct nouveau_ofuncs +nv04_graph_ofuncs = { + .ctor = nv04_graph_object_ctor, + .dtor = _nouveau_gpuobj_dtor, + .init = _nouveau_gpuobj_init, + .fini = _nouveau_gpuobj_fini, + .rd32 = _nouveau_gpuobj_rd32, + .wr32 = _nouveau_gpuobj_wr32, +}; + +static struct nouveau_oclass +nv04_graph_sclass[] = { + { 0x0012, &nv04_graph_ofuncs }, /* beta1 */ + { 0x0017, &nv04_graph_ofuncs }, /* chroma */ + { 0x0018, &nv04_graph_ofuncs }, /* pattern (nv01) */ + { 0x0019, &nv04_graph_ofuncs }, /* clip */ + { 0x001c, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* line */ + { 0x001d, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* tri */ + { 0x001e, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* rect */ + { 0x001f, &nv04_graph_ofuncs, nv01_graph_blit_omthds }, + { 0x0021, &nv04_graph_ofuncs, nv01_graph_ifc_omthds }, + { 0x0030, &nv04_graph_ofuncs }, /* null */ + { 0x0036, &nv04_graph_ofuncs, nv03_graph_sifc_omthds }, + { 0x0037, &nv04_graph_ofuncs, nv03_graph_sifm_omthds }, + { 0x0038, &nv04_graph_ofuncs }, /* dvd subpicture */ + { 0x0039, &nv04_graph_ofuncs }, /* m2mf */ + { 0x0042, &nv04_graph_ofuncs }, /* surf2d */ + { 0x0043, &nv04_graph_ofuncs }, /* rop */ + { 0x0044, &nv04_graph_ofuncs }, /* pattern */ + { 0x0048, &nv04_graph_ofuncs, nv03_graph_ttri_omthds }, + { 0x004a, &nv04_graph_ofuncs, nv04_graph_gdi_omthds }, + { 0x004b, &nv04_graph_ofuncs, nv03_graph_gdi_omthds }, + { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */ + { 0x0053, &nv04_graph_ofuncs, nv04_graph_surf3d_omthds }, + { 0x0054, &nv04_graph_ofuncs }, /* ttri */ + { 0x0055, &nv04_graph_ofuncs }, /* mtri */ + { 0x0057, &nv04_graph_ofuncs }, /* chroma */ + { 0x0058, &nv04_graph_ofuncs }, /* surf_dst */ + { 0x0059, &nv04_graph_ofuncs }, /* surf_src */ + { 0x005a, &nv04_graph_ofuncs }, /* surf_color */ + { 0x005b, &nv04_graph_ofuncs }, /* surf_zeta */ + { 0x005c, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* line */ + { 0x005d, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* tri */ + { 0x005e, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* rect */ + { 0x005f, &nv04_graph_ofuncs, nv04_graph_blit_omthds }, + { 0x0060, &nv04_graph_ofuncs, nv04_graph_iifc_omthds }, + { 0x0061, &nv04_graph_ofuncs, nv04_graph_ifc_omthds }, + { 0x0064, &nv04_graph_ofuncs }, /* iifc (nv05) */ + { 0x0065, &nv04_graph_ofuncs }, /* ifc (nv05) */ + { 0x0066, &nv04_graph_ofuncs }, /* sifc (nv05) */ + { 0x0072, &nv04_graph_ofuncs }, /* beta4 */ + { 0x0076, &nv04_graph_ofuncs, nv04_graph_sifc_omthds }, + { 0x0077, &nv04_graph_ofuncs, nv04_graph_sifm_omthds }, + {}, +}; + +/******************************************************************************* + * PGRAPH context + ******************************************************************************/ + +static struct nv04_graph_chan * +nv04_graph_channel(struct nv04_graph_priv *priv) +{ + struct nv04_graph_chan *chan = NULL; + if (nv_rd32(priv, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) { + int chid = nv_rd32(priv, NV04_PGRAPH_CTX_USER) >> 24; + if (chid < ARRAY_SIZE(priv->chan)) + chan = priv->chan[chid]; + } + return chan; +} + +static int +nv04_graph_load_context(struct nv04_graph_chan *chan, int chid) +{ + struct nv04_graph_priv *priv = nv04_graph_priv(chan); + int i; + + for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) + nv_wr32(priv, nv04_graph_ctx_regs[i], chan->nv04[i]); + + nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10010100); + nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24); + nv_mask(priv, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000); + return 0; +} + +static int +nv04_graph_unload_context(struct nv04_graph_chan *chan) +{ + struct nv04_graph_priv *priv = nv04_graph_priv(chan); + int i; + + for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) + chan->nv04[i] = nv_rd32(priv, nv04_graph_ctx_regs[i]); + + nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10000000); + nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000); + return 0; +} + +static void +nv04_graph_context_switch(struct nv04_graph_priv *priv) +{ + struct nv04_graph_chan *prev = NULL; + struct nv04_graph_chan *next = NULL; + unsigned long flags; + int chid; + + spin_lock_irqsave(&priv->lock, flags); + nv04_graph_idle(priv); + + /* If previous context is valid, we need to save it */ + prev = nv04_graph_channel(priv); + if (prev) + nv04_graph_unload_context(prev); + + /* load context for next channel */ + chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f; + next = priv->chan[chid]; + if (next) + nv04_graph_load_context(next, chid); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static u32 *ctx_reg(struct nv04_graph_chan *chan, u32 reg) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) { + if (nv04_graph_ctx_regs[i] == reg) + return &chan->nv04[i]; + } + + return NULL; +} + +static int +nv04_graph_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_fifo_chan *fifo = (void *)parent; + struct nv04_graph_priv *priv = (void *)engine; + struct nv04_graph_chan *chan; + unsigned long flags; + int ret; + + ret = nouveau_object_create(parent, engine, oclass, 0, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + spin_lock_irqsave(&priv->lock, flags); + if (priv->chan[fifo->chid]) { + *pobject = nv_object(priv->chan[fifo->chid]); + atomic_inc(&(*pobject)->refcount); + spin_unlock_irqrestore(&priv->lock, flags); + nouveau_object_destroy(&chan->base); + return 1; + } + + *ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31; + + priv->chan[fifo->chid] = chan; + chan->chid = fifo->chid; + spin_unlock_irqrestore(&priv->lock, flags); + return 0; +} + +static void +nv04_graph_context_dtor(struct nouveau_object *object) +{ + struct nv04_graph_priv *priv = (void *)object->engine; + struct nv04_graph_chan *chan = (void *)object; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + priv->chan[chan->chid] = NULL; + spin_unlock_irqrestore(&priv->lock, flags); + + nouveau_object_destroy(&chan->base); +} + +static int +nv04_graph_context_fini(struct nouveau_object *object, bool suspend) +{ + struct nv04_graph_priv *priv = (void *)object->engine; + struct nv04_graph_chan *chan = (void *)object; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); + if (nv04_graph_channel(priv) == chan) + nv04_graph_unload_context(chan); + nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); + spin_unlock_irqrestore(&priv->lock, flags); + + return nouveau_object_fini(&chan->base, suspend); +} + +static struct nouveau_oclass +nv04_graph_cclass = { + .handle = NV_ENGCTX(GR, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_graph_context_ctor, + .dtor = nv04_graph_context_dtor, + .init = nouveau_object_init, + .fini = nv04_graph_context_fini, + }, +}; + +/******************************************************************************* + * PGRAPH engine/subdev functions + ******************************************************************************/ + +bool +nv04_graph_idle(void *obj) +{ + struct nouveau_graph *graph = nouveau_graph(obj); + u32 mask = 0xffffffff; + + if (nv_device(obj)->card_type == NV_40) + mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL; + + if (!nv_wait(graph, NV04_PGRAPH_STATUS, mask, 0)) { + nv_error(graph, "idle timed out with status 0x%08x\n", + nv_rd32(graph, NV04_PGRAPH_STATUS)); + return false; + } + + return true; +} + +static const struct nouveau_bitfield +nv04_graph_intr_name[] = { + { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, + {} +}; + +static const struct nouveau_bitfield +nv04_graph_nstatus[] = { + { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, + { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, + { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, + { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, + {} +}; + +const struct nouveau_bitfield +nv04_graph_nsource[] = { + { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, + { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, + { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, + { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, + { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, + { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, + { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, + { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, + { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, + { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, + { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, + { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, + { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, + {} +}; + +static void +nv04_graph_intr(struct nouveau_subdev *subdev) +{ + struct nv04_graph_priv *priv = (void *)subdev; + struct nv04_graph_chan *chan = NULL; + struct nouveau_namedb *namedb = NULL; + struct nouveau_handle *handle = NULL; + u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR); + u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE); + u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS); + u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR); + u32 chid = (addr & 0x0f000000) >> 24; + u32 subc = (addr & 0x0000e000) >> 13; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA); + u32 class = nv_rd32(priv, 0x400180 + subc * 4) & 0xff; + u32 inst = (nv_rd32(priv, 0x40016c) & 0xffff) << 4; + u32 show = stat; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + chan = priv->chan[chid]; + if (chan) + namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS); + spin_unlock_irqrestore(&priv->lock, flags); + + if (stat & NV_PGRAPH_INTR_NOTIFY) { + if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) { + handle = nouveau_namedb_get_vinst(namedb, inst); + if (handle && !nv_call(handle->object, mthd, data)) + show &= ~NV_PGRAPH_INTR_NOTIFY; + } + } + + if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { + nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); + stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + nv04_graph_context_switch(priv); + } + + nv_wr32(priv, NV03_PGRAPH_INTR, stat); + nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); + + if (show) { + nv_error(priv, "%s", ""); + nouveau_bitfield_print(nv04_graph_intr_name, show); + pr_cont(" nsource:"); + nouveau_bitfield_print(nv04_graph_nsource, nsource); + pr_cont(" nstatus:"); + nouveau_bitfield_print(nv04_graph_nstatus, nstatus); + pr_cont("\n"); + nv_error(priv, + "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", + chid, nouveau_client_name(chan), subc, class, mthd, + data); + } + + nouveau_namedb_put(handle); +} + +static int +nv04_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_graph_priv *priv; + int ret; + + ret = nouveau_graph_create(parent, engine, oclass, true, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00001000; + nv_subdev(priv)->intr = nv04_graph_intr; + nv_engine(priv)->cclass = &nv04_graph_cclass; + nv_engine(priv)->sclass = nv04_graph_sclass; + spin_lock_init(&priv->lock); + return 0; +} + +static int +nv04_graph_init(struct nouveau_object *object) +{ + struct nouveau_engine *engine = nv_engine(object); + struct nv04_graph_priv *priv = (void *)engine; + int ret; + + ret = nouveau_graph_init(&priv->base); + if (ret) + return ret; + + /* Enable PGRAPH interrupts */ + nv_wr32(priv, NV03_PGRAPH_INTR, 0xFFFFFFFF); + nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); + + nv_wr32(priv, NV04_PGRAPH_VALID1, 0); + nv_wr32(priv, NV04_PGRAPH_VALID2, 0); + /*nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x000001FF); + nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/ + nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x1231c000); + /*1231C000 blob, 001 haiku*/ + /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/ + nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x72111100); + /*0x72111100 blob , 01 haiku*/ + /*nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/ + nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f071); + /*haiku same*/ + + /*nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/ + nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31); + /*haiku and blob 10d4*/ + + nv_wr32(priv, NV04_PGRAPH_STATE , 0xFFFFFFFF); + nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL , 0x10000100); + nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000); + + /* These don't belong here, they're part of a per-channel context */ + nv_wr32(priv, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000); + nv_wr32(priv, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF); + return 0; +} + +struct nouveau_oclass +nv04_graph_oclass = { + .handle = NV_ENGINE(GR, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_graph_ctor, + .dtor = _nouveau_graph_dtor, + .init = nv04_graph_init, + .fini = _nouveau_graph_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c new file mode 100644 index 0000000..23c143a --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c @@ -0,0 +1,1316 @@ +/* + * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr> + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include <core/client.h> +#include <core/os.h> +#include <core/class.h> +#include <core/handle.h> + +#include <subdev/fb.h> + +#include <engine/fifo.h> +#include <engine/graph.h> + +#include "regs.h" + +struct pipe_state { + u32 pipe_0x0000[0x040/4]; + u32 pipe_0x0040[0x010/4]; + u32 pipe_0x0200[0x0c0/4]; + u32 pipe_0x4400[0x080/4]; + u32 pipe_0x6400[0x3b0/4]; + u32 pipe_0x6800[0x2f0/4]; + u32 pipe_0x6c00[0x030/4]; + u32 pipe_0x7000[0x130/4]; + u32 pipe_0x7400[0x0c0/4]; + u32 pipe_0x7800[0x0c0/4]; +}; + +static int nv10_graph_ctx_regs[] = { + NV10_PGRAPH_CTX_SWITCH(0), + NV10_PGRAPH_CTX_SWITCH(1), + NV10_PGRAPH_CTX_SWITCH(2), + NV10_PGRAPH_CTX_SWITCH(3), + NV10_PGRAPH_CTX_SWITCH(4), + NV10_PGRAPH_CTX_CACHE(0, 0), + NV10_PGRAPH_CTX_CACHE(0, 1), + NV10_PGRAPH_CTX_CACHE(0, 2), + NV10_PGRAPH_CTX_CACHE(0, 3), + NV10_PGRAPH_CTX_CACHE(0, 4), + NV10_PGRAPH_CTX_CACHE(1, 0), + NV10_PGRAPH_CTX_CACHE(1, 1), + NV10_PGRAPH_CTX_CACHE(1, 2), + NV10_PGRAPH_CTX_CACHE(1, 3), + NV10_PGRAPH_CTX_CACHE(1, 4), + NV10_PGRAPH_CTX_CACHE(2, 0), + NV10_PGRAPH_CTX_CACHE(2, 1), + NV10_PGRAPH_CTX_CACHE(2, 2), + NV10_PGRAPH_CTX_CACHE(2, 3), + NV10_PGRAPH_CTX_CACHE(2, 4), + NV10_PGRAPH_CTX_CACHE(3, 0), + NV10_PGRAPH_CTX_CACHE(3, 1), + NV10_PGRAPH_CTX_CACHE(3, 2), + NV10_PGRAPH_CTX_CACHE(3, 3), + NV10_PGRAPH_CTX_CACHE(3, 4), + NV10_PGRAPH_CTX_CACHE(4, 0), + NV10_PGRAPH_CTX_CACHE(4, 1), + NV10_PGRAPH_CTX_CACHE(4, 2), + NV10_PGRAPH_CTX_CACHE(4, 3), + NV10_PGRAPH_CTX_CACHE(4, 4), + NV10_PGRAPH_CTX_CACHE(5, 0), + NV10_PGRAPH_CTX_CACHE(5, 1), + NV10_PGRAPH_CTX_CACHE(5, 2), + NV10_PGRAPH_CTX_CACHE(5, 3), + NV10_PGRAPH_CTX_CACHE(5, 4), + NV10_PGRAPH_CTX_CACHE(6, 0), + NV10_PGRAPH_CTX_CACHE(6, 1), + NV10_PGRAPH_CTX_CACHE(6, 2), + NV10_PGRAPH_CTX_CACHE(6, 3), + NV10_PGRAPH_CTX_CACHE(6, 4), + NV10_PGRAPH_CTX_CACHE(7, 0), + NV10_PGRAPH_CTX_CACHE(7, 1), + NV10_PGRAPH_CTX_CACHE(7, 2), + NV10_PGRAPH_CTX_CACHE(7, 3), + NV10_PGRAPH_CTX_CACHE(7, 4), + NV10_PGRAPH_CTX_USER, + NV04_PGRAPH_DMA_START_0, + NV04_PGRAPH_DMA_START_1, + NV04_PGRAPH_DMA_LENGTH, + NV04_PGRAPH_DMA_MISC, + NV10_PGRAPH_DMA_PITCH, + NV04_PGRAPH_BOFFSET0, + NV04_PGRAPH_BBASE0, + NV04_PGRAPH_BLIMIT0, + NV04_PGRAPH_BOFFSET1, + NV04_PGRAPH_BBASE1, + NV04_PGRAPH_BLIMIT1, + NV04_PGRAPH_BOFFSET2, + NV04_PGRAPH_BBASE2, + NV04_PGRAPH_BLIMIT2, + NV04_PGRAPH_BOFFSET3, + NV04_PGRAPH_BBASE3, + NV04_PGRAPH_BLIMIT3, + NV04_PGRAPH_BOFFSET4, + NV04_PGRAPH_BBASE4, + NV04_PGRAPH_BLIMIT4, + NV04_PGRAPH_BOFFSET5, + NV04_PGRAPH_BBASE5, + NV04_PGRAPH_BLIMIT5, + NV04_PGRAPH_BPITCH0, + NV04_PGRAPH_BPITCH1, + NV04_PGRAPH_BPITCH2, + NV04_PGRAPH_BPITCH3, + NV04_PGRAPH_BPITCH4, + NV10_PGRAPH_SURFACE, + NV10_PGRAPH_STATE, + NV04_PGRAPH_BSWIZZLE2, + NV04_PGRAPH_BSWIZZLE5, + NV04_PGRAPH_BPIXEL, + NV10_PGRAPH_NOTIFY, + NV04_PGRAPH_PATT_COLOR0, + NV04_PGRAPH_PATT_COLOR1, + NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */ + 0x00400904, + 0x00400908, + 0x0040090c, + 0x00400910, + 0x00400914, + 0x00400918, + 0x0040091c, + 0x00400920, + 0x00400924, + 0x00400928, + 0x0040092c, + 0x00400930, + 0x00400934, + 0x00400938, + 0x0040093c, + 0x00400940, + 0x00400944, + 0x00400948, + 0x0040094c, + 0x00400950, + 0x00400954, + 0x00400958, + 0x0040095c, + 0x00400960, + 0x00400964, + 0x00400968, + 0x0040096c, + 0x00400970, + 0x00400974, + 0x00400978, + 0x0040097c, + 0x00400980, + 0x00400984, + 0x00400988, + 0x0040098c, + 0x00400990, + 0x00400994, + 0x00400998, + 0x0040099c, + 0x004009a0, + 0x004009a4, + 0x004009a8, + 0x004009ac, + 0x004009b0, + 0x004009b4, + 0x004009b8, + 0x004009bc, + 0x004009c0, + 0x004009c4, + 0x004009c8, + 0x004009cc, + 0x004009d0, + 0x004009d4, + 0x004009d8, + 0x004009dc, + 0x004009e0, + 0x004009e4, + 0x004009e8, + 0x004009ec, + 0x004009f0, + 0x004009f4, + 0x004009f8, + 0x004009fc, + NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */ + 0x0040080c, + NV04_PGRAPH_PATTERN_SHAPE, + NV03_PGRAPH_MONO_COLOR0, + NV04_PGRAPH_ROP3, + NV04_PGRAPH_CHROMA, + NV04_PGRAPH_BETA_AND, + NV04_PGRAPH_BETA_PREMULT, + 0x00400e70, + 0x00400e74, + 0x00400e78, + 0x00400e7c, + 0x00400e80, + 0x00400e84, + 0x00400e88, + 0x00400e8c, + 0x00400ea0, + 0x00400ea4, + 0x00400ea8, + 0x00400e90, + 0x00400e94, + 0x00400e98, + 0x00400e9c, + NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */ + NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20-0x400f3c */ + 0x00400f04, + 0x00400f24, + 0x00400f08, + 0x00400f28, + 0x00400f0c, + 0x00400f2c, + 0x00400f10, + 0x00400f30, + 0x00400f14, + 0x00400f34, + 0x00400f18, + 0x00400f38, + 0x00400f1c, + 0x00400f3c, + NV10_PGRAPH_XFMODE0, + NV10_PGRAPH_XFMODE1, + NV10_PGRAPH_GLOBALSTATE0, + NV10_PGRAPH_GLOBALSTATE1, + NV04_PGRAPH_STORED_FMT, + NV04_PGRAPH_SOURCE_COLOR, + NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */ + NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */ + 0x00400404, + 0x00400484, + 0x00400408, + 0x00400488, + 0x0040040c, + 0x0040048c, + 0x00400410, + 0x00400490, + 0x00400414, + 0x00400494, + 0x00400418, + 0x00400498, + 0x0040041c, + 0x0040049c, + 0x00400420, + 0x004004a0, + 0x00400424, + 0x004004a4, + 0x00400428, + 0x004004a8, + 0x0040042c, + 0x004004ac, + 0x00400430, + 0x004004b0, + 0x00400434, + 0x004004b4, + 0x00400438, + 0x004004b8, + 0x0040043c, + 0x004004bc, + 0x00400440, + 0x004004c0, + 0x00400444, + 0x004004c4, + 0x00400448, + 0x004004c8, + 0x0040044c, + 0x004004cc, + 0x00400450, + 0x004004d0, + 0x00400454, + 0x004004d4, + 0x00400458, + 0x004004d8, + 0x0040045c, + 0x004004dc, + 0x00400460, + 0x004004e0, + 0x00400464, + 0x004004e4, + 0x00400468, + 0x004004e8, + 0x0040046c, + 0x004004ec, + 0x00400470, + 0x004004f0, + 0x00400474, + 0x004004f4, + 0x00400478, + 0x004004f8, + 0x0040047c, + 0x004004fc, + NV03_PGRAPH_ABS_UCLIP_XMIN, + NV03_PGRAPH_ABS_UCLIP_XMAX, + NV03_PGRAPH_ABS_UCLIP_YMIN, + NV03_PGRAPH_ABS_UCLIP_YMAX, + 0x00400550, + 0x00400558, + 0x00400554, + 0x0040055c, + NV03_PGRAPH_ABS_UCLIPA_XMIN, + NV03_PGRAPH_ABS_UCLIPA_XMAX, + NV03_PGRAPH_ABS_UCLIPA_YMIN, + NV03_PGRAPH_ABS_UCLIPA_YMAX, + NV03_PGRAPH_ABS_ICLIP_XMAX, + NV03_PGRAPH_ABS_ICLIP_YMAX, + NV03_PGRAPH_XY_LOGIC_MISC0, + NV03_PGRAPH_XY_LOGIC_MISC1, + NV03_PGRAPH_XY_LOGIC_MISC2, + NV03_PGRAPH_XY_LOGIC_MISC3, + NV03_PGRAPH_CLIPX_0, + NV03_PGRAPH_CLIPX_1, + NV03_PGRAPH_CLIPY_0, + NV03_PGRAPH_CLIPY_1, + NV10_PGRAPH_COMBINER0_IN_ALPHA, + NV10_PGRAPH_COMBINER1_IN_ALPHA, + NV10_PGRAPH_COMBINER0_IN_RGB, + NV10_PGRAPH_COMBINER1_IN_RGB, + NV10_PGRAPH_COMBINER_COLOR0, + NV10_PGRAPH_COMBINER_COLOR1, + NV10_PGRAPH_COMBINER0_OUT_ALPHA, + NV10_PGRAPH_COMBINER1_OUT_ALPHA, + NV10_PGRAPH_COMBINER0_OUT_RGB, + NV10_PGRAPH_COMBINER1_OUT_RGB, + NV10_PGRAPH_COMBINER_FINAL0, + NV10_PGRAPH_COMBINER_FINAL1, + 0x00400e00, + 0x00400e04, + 0x00400e08, + 0x00400e0c, + 0x00400e10, + 0x00400e14, + 0x00400e18, + 0x00400e1c, + 0x00400e20, + 0x00400e24, + 0x00400e28, + 0x00400e2c, + 0x00400e30, + 0x00400e34, + 0x00400e38, + 0x00400e3c, + NV04_PGRAPH_PASSTHRU_0, + NV04_PGRAPH_PASSTHRU_1, + NV04_PGRAPH_PASSTHRU_2, + NV10_PGRAPH_DIMX_TEXTURE, + NV10_PGRAPH_WDIMX_TEXTURE, + NV10_PGRAPH_DVD_COLORFMT, + NV10_PGRAPH_SCALED_FORMAT, + NV04_PGRAPH_MISC24_0, + NV04_PGRAPH_MISC24_1, + NV04_PGRAPH_MISC24_2, + NV03_PGRAPH_X_MISC, + NV03_PGRAPH_Y_MISC, + NV04_PGRAPH_VALID1, + NV04_PGRAPH_VALID2, +}; + +static int nv17_graph_ctx_regs[] = { + NV10_PGRAPH_DEBUG_4, + 0x004006b0, + 0x00400eac, + 0x00400eb0, + 0x00400eb4, + 0x00400eb8, + 0x00400ebc, + 0x00400ec0, + 0x00400ec4, + 0x00400ec8, + 0x00400ecc, + 0x00400ed0, + 0x00400ed4, + 0x00400ed8, + 0x00400edc, + 0x00400ee0, + 0x00400a00, + 0x00400a04, +}; + +struct nv10_graph_priv { + struct nouveau_graph base; + struct nv10_graph_chan *chan[32]; + spinlock_t lock; +}; + +struct nv10_graph_chan { + struct nouveau_object base; + int chid; + int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)]; + int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)]; + struct pipe_state pipe_state; + u32 lma_window[4]; +}; + + +static inline struct nv10_graph_priv * +nv10_graph_priv(struct nv10_graph_chan *chan) +{ + return (void *)nv_object(chan)->engine; +} + +/******************************************************************************* + * Graphics object classes + ******************************************************************************/ + +#define PIPE_SAVE(priv, state, addr) \ + do { \ + int __i; \ + nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr); \ + for (__i = 0; __i < ARRAY_SIZE(state); __i++) \ + state[__i] = nv_rd32(priv, NV10_PGRAPH_PIPE_DATA); \ + } while (0) + +#define PIPE_RESTORE(priv, state, addr) \ + do { \ + int __i; \ + nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr); \ + for (__i = 0; __i < ARRAY_SIZE(state); __i++) \ + nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, state[__i]); \ + } while (0) + +static struct nouveau_oclass +nv10_graph_sclass[] = { + { 0x0012, &nv04_graph_ofuncs }, /* beta1 */ + { 0x0019, &nv04_graph_ofuncs }, /* clip */ + { 0x0030, &nv04_graph_ofuncs }, /* null */ + { 0x0039, &nv04_graph_ofuncs }, /* m2mf */ + { 0x0043, &nv04_graph_ofuncs }, /* rop */ + { 0x0044, &nv04_graph_ofuncs }, /* pattern */ + { 0x004a, &nv04_graph_ofuncs }, /* gdi */ + { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */ + { 0x005f, &nv04_graph_ofuncs }, /* blit */ + { 0x0062, &nv04_graph_ofuncs }, /* surf2d */ + { 0x0072, &nv04_graph_ofuncs }, /* beta4 */ + { 0x0089, &nv04_graph_ofuncs }, /* sifm */ + { 0x008a, &nv04_graph_ofuncs }, /* ifc */ + { 0x009f, &nv04_graph_ofuncs }, /* blit */ + { 0x0093, &nv04_graph_ofuncs }, /* surf3d */ + { 0x0094, &nv04_graph_ofuncs }, /* ttri */ + { 0x0095, &nv04_graph_ofuncs }, /* mtri */ + { 0x0056, &nv04_graph_ofuncs }, /* celcius */ + {}, +}; + +static struct nouveau_oclass +nv15_graph_sclass[] = { + { 0x0012, &nv04_graph_ofuncs }, /* beta1 */ + { 0x0019, &nv04_graph_ofuncs }, /* clip */ + { 0x0030, &nv04_graph_ofuncs }, /* null */ + { 0x0039, &nv04_graph_ofuncs }, /* m2mf */ + { 0x0043, &nv04_graph_ofuncs }, /* rop */ + { 0x0044, &nv04_graph_ofuncs }, /* pattern */ + { 0x004a, &nv04_graph_ofuncs }, /* gdi */ + { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */ + { 0x005f, &nv04_graph_ofuncs }, /* blit */ + { 0x0062, &nv04_graph_ofuncs }, /* surf2d */ + { 0x0072, &nv04_graph_ofuncs }, /* beta4 */ + { 0x0089, &nv04_graph_ofuncs }, /* sifm */ + { 0x008a, &nv04_graph_ofuncs }, /* ifc */ + { 0x009f, &nv04_graph_ofuncs }, /* blit */ + { 0x0093, &nv04_graph_ofuncs }, /* surf3d */ + { 0x0094, &nv04_graph_ofuncs }, /* ttri */ + { 0x0095, &nv04_graph_ofuncs }, /* mtri */ + { 0x0096, &nv04_graph_ofuncs }, /* celcius */ + {}, +}; + +static int +nv17_graph_mthd_lma_window(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + struct nv10_graph_chan *chan = (void *)object->parent; + struct nv10_graph_priv *priv = nv10_graph_priv(chan); + struct pipe_state *pipe = &chan->pipe_state; + u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3]; + u32 xfmode0, xfmode1; + u32 data = *(u32 *)args; + int i; + + chan->lma_window[(mthd - 0x1638) / 4] = data; + + if (mthd != 0x1644) + return 0; + + nv04_graph_idle(priv); + + PIPE_SAVE(priv, pipe_0x0040, 0x0040); + PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200); + + PIPE_RESTORE(priv, chan->lma_window, 0x6790); + + nv04_graph_idle(priv); + + xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0); + xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1); + + PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400); + PIPE_SAVE(priv, pipe_0x64c0, 0x64c0); + PIPE_SAVE(priv, pipe_0x6ab0, 0x6ab0); + PIPE_SAVE(priv, pipe_0x6a80, 0x6a80); + + nv04_graph_idle(priv); + + nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000); + nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000); + nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); + for (i = 0; i < 4; i++) + nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000); + for (i = 0; i < 4; i++) + nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000); + + nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); + for (i = 0; i < 3; i++) + nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000); + + nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); + for (i = 0; i < 3; i++) + nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000); + + nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); + nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008); + + PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200); + + nv04_graph_idle(priv); + + PIPE_RESTORE(priv, pipe_0x0040, 0x0040); + + nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0); + nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1); + + PIPE_RESTORE(priv, pipe_0x64c0, 0x64c0); + PIPE_RESTORE(priv, pipe_0x6ab0, 0x6ab0); + PIPE_RESTORE(priv, pipe_0x6a80, 0x6a80); + PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400); + + nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0); + nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000); + + nv04_graph_idle(priv); + + return 0; +} + +static int +nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + struct nv10_graph_chan *chan = (void *)object->parent; + struct nv10_graph_priv *priv = nv10_graph_priv(chan); + + nv04_graph_idle(priv); + + nv_mask(priv, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100); + nv_mask(priv, 0x4006b0, 0x08000000, 0x08000000); + return 0; +} + +static struct nouveau_omthds +nv17_celcius_omthds[] = { + { 0x1638, 0x1638, nv17_graph_mthd_lma_window }, + { 0x163c, 0x163c, nv17_graph_mthd_lma_window }, + { 0x1640, 0x1640, nv17_graph_mthd_lma_window }, + { 0x1644, 0x1644, nv17_graph_mthd_lma_window }, + { 0x1658, 0x1658, nv17_graph_mthd_lma_enable }, + {} +}; + +static struct nouveau_oclass +nv17_graph_sclass[] = { + { 0x0012, &nv04_graph_ofuncs }, /* beta1 */ + { 0x0019, &nv04_graph_ofuncs }, /* clip */ + { 0x0030, &nv04_graph_ofuncs }, /* null */ + { 0x0039, &nv04_graph_ofuncs }, /* m2mf */ + { 0x0043, &nv04_graph_ofuncs }, /* rop */ + { 0x0044, &nv04_graph_ofuncs }, /* pattern */ + { 0x004a, &nv04_graph_ofuncs }, /* gdi */ + { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */ + { 0x005f, &nv04_graph_ofuncs }, /* blit */ + { 0x0062, &nv04_graph_ofuncs }, /* surf2d */ + { 0x0072, &nv04_graph_ofuncs }, /* beta4 */ + { 0x0089, &nv04_graph_ofuncs }, /* sifm */ + { 0x008a, &nv04_graph_ofuncs }, /* ifc */ + { 0x009f, &nv04_graph_ofuncs }, /* blit */ + { 0x0093, &nv04_graph_ofuncs }, /* surf3d */ + { 0x0094, &nv04_graph_ofuncs }, /* ttri */ + { 0x0095, &nv04_graph_ofuncs }, /* mtri */ + { 0x0099, &nv04_graph_ofuncs, nv17_celcius_omthds }, + {}, +}; + +/******************************************************************************* + * PGRAPH context + ******************************************************************************/ + +static struct nv10_graph_chan * +nv10_graph_channel(struct nv10_graph_priv *priv) +{ + struct nv10_graph_chan *chan = NULL; + if (nv_rd32(priv, 0x400144) & 0x00010000) { + int chid = nv_rd32(priv, 0x400148) >> 24; + if (chid < ARRAY_SIZE(priv->chan)) + chan = priv->chan[chid]; + } + return chan; +} + +static void +nv10_graph_save_pipe(struct nv10_graph_chan *chan) +{ + struct nv10_graph_priv *priv = nv10_graph_priv(chan); + struct pipe_state *pipe = &chan->pipe_state; + + PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400); + PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200); + PIPE_SAVE(priv, pipe->pipe_0x6400, 0x6400); + PIPE_SAVE(priv, pipe->pipe_0x6800, 0x6800); + PIPE_SAVE(priv, pipe->pipe_0x6c00, 0x6c00); + PIPE_SAVE(priv, pipe->pipe_0x7000, 0x7000); + PIPE_SAVE(priv, pipe->pipe_0x7400, 0x7400); + PIPE_SAVE(priv, pipe->pipe_0x7800, 0x7800); + PIPE_SAVE(priv, pipe->pipe_0x0040, 0x0040); + PIPE_SAVE(priv, pipe->pipe_0x0000, 0x0000); +} + +static void +nv10_graph_load_pipe(struct nv10_graph_chan *chan) +{ + struct nv10_graph_priv *priv = nv10_graph_priv(chan); + struct pipe_state *pipe = &chan->pipe_state; + u32 xfmode0, xfmode1; + int i; + + nv04_graph_idle(priv); + /* XXX check haiku comments */ + xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0); + xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1); + nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000); + nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000); + nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); + for (i = 0; i < 4; i++) + nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000); + for (i = 0; i < 4; i++) + nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000); + + nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); + for (i = 0; i < 3; i++) + nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000); + + nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); + for (i = 0; i < 3; i++) + nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000); + + nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); + nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008); + + + PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200); + nv04_graph_idle(priv); + + /* restore XFMODE */ + nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0); + nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1); + PIPE_RESTORE(priv, pipe->pipe_0x6400, 0x6400); + PIPE_RESTORE(priv, pipe->pipe_0x6800, 0x6800); + PIPE_RESTORE(priv, pipe->pipe_0x6c00, 0x6c00); + PIPE_RESTORE(priv, pipe->pipe_0x7000, 0x7000); + PIPE_RESTORE(priv, pipe->pipe_0x7400, 0x7400); + PIPE_RESTORE(priv, pipe->pipe_0x7800, 0x7800); + PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400); + PIPE_RESTORE(priv, pipe->pipe_0x0000, 0x0000); + PIPE_RESTORE(priv, pipe->pipe_0x0040, 0x0040); + nv04_graph_idle(priv); +} + +static void +nv10_graph_create_pipe(struct nv10_graph_chan *chan) +{ + struct nv10_graph_priv *priv = nv10_graph_priv(chan); + struct pipe_state *pipe_state = &chan->pipe_state; + u32 *pipe_state_addr; + int i; +#define PIPE_INIT(addr) \ + do { \ + pipe_state_addr = pipe_state->pipe_##addr; \ + } while (0) +#define PIPE_INIT_END(addr) \ + do { \ + u32 *__end_addr = pipe_state->pipe_##addr + \ + ARRAY_SIZE(pipe_state->pipe_##addr); \ + if (pipe_state_addr != __end_addr) \ + nv_error(priv, "incomplete pipe init for 0x%x : %p/%p\n", \ + addr, pipe_state_addr, __end_addr); \ + } while (0) +#define NV_WRITE_PIPE_INIT(value) *(pipe_state_addr++) = value + + PIPE_INIT(0x0200); + for (i = 0; i < 48; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x0200); + + PIPE_INIT(0x6400); + for (i = 0; i < 211; i++) + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x40000000); + NV_WRITE_PIPE_INIT(0x40000000); + NV_WRITE_PIPE_INIT(0x40000000); + NV_WRITE_PIPE_INIT(0x40000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f000000); + NV_WRITE_PIPE_INIT(0x3f000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x3f800000); + NV_WRITE_PIPE_INIT(0x3f800000); + PIPE_INIT_END(0x6400); + + PIPE_INIT(0x6800); + for (i = 0; i < 162; i++) + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x3f800000); + for (i = 0; i < 25; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x6800); + + PIPE_INIT(0x6c00); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0xbf800000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x6c00); + + PIPE_INIT(0x7000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x00000000); + NV_WRITE_PIPE_INIT(0x7149f2ca); + for (i = 0; i < 35; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x7000); + + PIPE_INIT(0x7400); + for (i = 0; i < 48; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x7400); + + PIPE_INIT(0x7800); + for (i = 0; i < 48; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x7800); + + PIPE_INIT(0x4400); + for (i = 0; i < 32; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x4400); + + PIPE_INIT(0x0000); + for (i = 0; i < 16; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x0000); + + PIPE_INIT(0x0040); + for (i = 0; i < 4; i++) + NV_WRITE_PIPE_INIT(0x00000000); + PIPE_INIT_END(0x0040); + +#undef PIPE_INIT +#undef PIPE_INIT_END +#undef NV_WRITE_PIPE_INIT +} + +static int +nv10_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg) +{ + int i; + for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) { + if (nv10_graph_ctx_regs[i] == reg) + return i; + } + nv_error(priv, "unknow offset nv10_ctx_regs %d\n", reg); + return -1; +} + +static int +nv17_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg) +{ + int i; + for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) { + if (nv17_graph_ctx_regs[i] == reg) + return i; + } + nv_error(priv, "unknow offset nv17_ctx_regs %d\n", reg); + return -1; +} + +static void +nv10_graph_load_dma_vtxbuf(struct nv10_graph_chan *chan, int chid, u32 inst) +{ + struct nv10_graph_priv *priv = nv10_graph_priv(chan); + u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4]; + u32 ctx_user, ctx_switch[5]; + int i, subchan = -1; + + /* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state + * that cannot be restored via MMIO. Do it through the FIFO + * instead. + */ + + /* Look for a celsius object */ + for (i = 0; i < 8; i++) { + int class = nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff; + + if (class == 0x56 || class == 0x96 || class == 0x99) { + subchan = i; + break; + } + } + + if (subchan < 0 || !inst) + return; + + /* Save the current ctx object */ + ctx_user = nv_rd32(priv, NV10_PGRAPH_CTX_USER); + for (i = 0; i < 5; i++) + ctx_switch[i] = nv_rd32(priv, NV10_PGRAPH_CTX_SWITCH(i)); + + /* Save the FIFO state */ + st2 = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2); + st2_dl = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DL); + st2_dh = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DH); + fifo_ptr = nv_rd32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR); + + for (i = 0; i < ARRAY_SIZE(fifo); i++) + fifo[i] = nv_rd32(priv, 0x4007a0 + 4 * i); + + /* Switch to the celsius subchannel */ + for (i = 0; i < 5; i++) + nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i), + nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(subchan, i))); + nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13); + + /* Inject NV10TCL_DMA_VTXBUF */ + nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0); + nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, + 0x2c000000 | chid << 20 | subchan << 16 | 0x18c); + nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, inst); + nv_mask(priv, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000); + nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); + nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); + + /* Restore the FIFO state */ + for (i = 0; i < ARRAY_SIZE(fifo); i++) + nv_wr32(priv, 0x4007a0 + 4 * i, fifo[i]); + + nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr); + nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, st2); + nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl); + nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh); + + /* Restore the current ctx object */ + for (i = 0; i < 5; i++) + nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]); + nv_wr32(priv, NV10_PGRAPH_CTX_USER, ctx_user); +} + +static int +nv10_graph_load_context(struct nv10_graph_chan *chan, int chid) +{ + struct nv10_graph_priv *priv = nv10_graph_priv(chan); + u32 inst; + int i; + + for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) + nv_wr32(priv, nv10_graph_ctx_regs[i], chan->nv10[i]); + + if (nv_device(priv)->chipset >= 0x17) { + for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) + nv_wr32(priv, nv17_graph_ctx_regs[i], chan->nv17[i]); + } + + nv10_graph_load_pipe(chan); + + inst = nv_rd32(priv, NV10_PGRAPH_GLOBALSTATE1) & 0xffff; + nv10_graph_load_dma_vtxbuf(chan, chid, inst); + + nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100); + nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24); + nv_mask(priv, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000); + return 0; +} + +static int +nv10_graph_unload_context(struct nv10_graph_chan *chan) +{ + struct nv10_graph_priv *priv = nv10_graph_priv(chan); + int i; + + for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) + chan->nv10[i] = nv_rd32(priv, nv10_graph_ctx_regs[i]); + + if (nv_device(priv)->chipset >= 0x17) { + for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) + chan->nv17[i] = nv_rd32(priv, nv17_graph_ctx_regs[i]); + } + + nv10_graph_save_pipe(chan); + + nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000000); + nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000); + return 0; +} + +static void +nv10_graph_context_switch(struct nv10_graph_priv *priv) +{ + struct nv10_graph_chan *prev = NULL; + struct nv10_graph_chan *next = NULL; + unsigned long flags; + int chid; + + spin_lock_irqsave(&priv->lock, flags); + nv04_graph_idle(priv); + + /* If previous context is valid, we need to save it */ + prev = nv10_graph_channel(priv); + if (prev) + nv10_graph_unload_context(prev); + + /* load context for next channel */ + chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; + next = priv->chan[chid]; + if (next) + nv10_graph_load_context(next, chid); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +#define NV_WRITE_CTX(reg, val) do { \ + int offset = nv10_graph_ctx_regs_find_offset(priv, reg); \ + if (offset > 0) \ + chan->nv10[offset] = val; \ + } while (0) + +#define NV17_WRITE_CTX(reg, val) do { \ + int offset = nv17_graph_ctx_regs_find_offset(priv, reg); \ + if (offset > 0) \ + chan->nv17[offset] = val; \ + } while (0) + +static int +nv10_graph_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_fifo_chan *fifo = (void *)parent; + struct nv10_graph_priv *priv = (void *)engine; + struct nv10_graph_chan *chan; + unsigned long flags; + int ret; + + ret = nouveau_object_create(parent, engine, oclass, 0, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + spin_lock_irqsave(&priv->lock, flags); + if (priv->chan[fifo->chid]) { + *pobject = nv_object(priv->chan[fifo->chid]); + atomic_inc(&(*pobject)->refcount); + spin_unlock_irqrestore(&priv->lock, flags); + nouveau_object_destroy(&chan->base); + return 1; + } + + NV_WRITE_CTX(0x00400e88, 0x08000000); + NV_WRITE_CTX(0x00400e9c, 0x4b7fffff); + NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff); + NV_WRITE_CTX(0x00400e10, 0x00001000); + NV_WRITE_CTX(0x00400e14, 0x00001000); + NV_WRITE_CTX(0x00400e30, 0x00080008); + NV_WRITE_CTX(0x00400e34, 0x00080008); + if (nv_device(priv)->chipset >= 0x17) { + /* is it really needed ??? */ + NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4, + nv_rd32(priv, NV10_PGRAPH_DEBUG_4)); + NV17_WRITE_CTX(0x004006b0, nv_rd32(priv, 0x004006b0)); + NV17_WRITE_CTX(0x00400eac, 0x0fff0000); + NV17_WRITE_CTX(0x00400eb0, 0x0fff0000); + NV17_WRITE_CTX(0x00400ec0, 0x00000080); + NV17_WRITE_CTX(0x00400ed0, 0x00000080); + } + NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->chid << 24); + + nv10_graph_create_pipe(chan); + + priv->chan[fifo->chid] = chan; + chan->chid = fifo->chid; + spin_unlock_irqrestore(&priv->lock, flags); + return 0; +} + +static void +nv10_graph_context_dtor(struct nouveau_object *object) +{ + struct nv10_graph_priv *priv = (void *)object->engine; + struct nv10_graph_chan *chan = (void *)object; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + priv->chan[chan->chid] = NULL; + spin_unlock_irqrestore(&priv->lock, flags); + + nouveau_object_destroy(&chan->base); +} + +static int +nv10_graph_context_fini(struct nouveau_object *object, bool suspend) +{ + struct nv10_graph_priv *priv = (void *)object->engine; + struct nv10_graph_chan *chan = (void *)object; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000); + if (nv10_graph_channel(priv) == chan) + nv10_graph_unload_context(chan); + nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001); + spin_unlock_irqrestore(&priv->lock, flags); + + return nouveau_object_fini(&chan->base, suspend); +} + +static struct nouveau_oclass +nv10_graph_cclass = { + .handle = NV_ENGCTX(GR, 0x10), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv10_graph_context_ctor, + .dtor = nv10_graph_context_dtor, + .init = nouveau_object_init, + .fini = nv10_graph_context_fini, + }, +}; + +/******************************************************************************* + * PGRAPH engine/subdev functions + ******************************************************************************/ + +static void +nv10_graph_tile_prog(struct nouveau_engine *engine, int i) +{ + struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i]; + struct nouveau_fifo *pfifo = nouveau_fifo(engine); + struct nv10_graph_priv *priv = (void *)engine; + unsigned long flags; + + pfifo->pause(pfifo, &flags); + nv04_graph_idle(priv); + + nv_wr32(priv, NV10_PGRAPH_TLIMIT(i), tile->limit); + nv_wr32(priv, NV10_PGRAPH_TSIZE(i), tile->pitch); + nv_wr32(priv, NV10_PGRAPH_TILE(i), tile->addr); + + pfifo->start(pfifo, &flags); +} + +const struct nouveau_bitfield nv10_graph_intr_name[] = { + { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, + { NV_PGRAPH_INTR_ERROR, "ERROR" }, + {} +}; + +const struct nouveau_bitfield nv10_graph_nstatus[] = { + { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, + { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, + { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, + { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, + {} +}; + +static void +nv10_graph_intr(struct nouveau_subdev *subdev) +{ + struct nv10_graph_priv *priv = (void *)subdev; + struct nv10_graph_chan *chan = NULL; + struct nouveau_namedb *namedb = NULL; + struct nouveau_handle *handle = NULL; + u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR); + u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE); + u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS); + u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR); + u32 chid = (addr & 0x01f00000) >> 20; + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA); + u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff; + u32 show = stat; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + chan = priv->chan[chid]; + if (chan) + namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS); + spin_unlock_irqrestore(&priv->lock, flags); + + if (stat & NV_PGRAPH_INTR_ERROR) { + if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) { + handle = nouveau_namedb_get_class(namedb, class); + if (handle && !nv_call(handle->object, mthd, data)) + show &= ~NV_PGRAPH_INTR_ERROR; + } + } + + if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { + nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); + stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + nv10_graph_context_switch(priv); + } + + nv_wr32(priv, NV03_PGRAPH_INTR, stat); + nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); + + if (show) { + nv_error(priv, "%s", ""); + nouveau_bitfield_print(nv10_graph_intr_name, show); + pr_cont(" nsource:"); + nouveau_bitfield_print(nv04_graph_nsource, nsource); + pr_cont(" nstatus:"); + nouveau_bitfield_print(nv10_graph_nstatus, nstatus); + pr_cont("\n"); + nv_error(priv, + "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", + chid, nouveau_client_name(chan), subc, class, mthd, + data); + } + + nouveau_namedb_put(handle); +} + +static int +nv10_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv10_graph_priv *priv; + int ret; + + ret = nouveau_graph_create(parent, engine, oclass, true, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00001000; + nv_subdev(priv)->intr = nv10_graph_intr; + nv_engine(priv)->cclass = &nv10_graph_cclass; + + if (nv_device(priv)->chipset <= 0x10) + nv_engine(priv)->sclass = nv10_graph_sclass; + else + if (nv_device(priv)->chipset < 0x17 || + nv_device(priv)->chipset == 0x1a) + nv_engine(priv)->sclass = nv15_graph_sclass; + else + nv_engine(priv)->sclass = nv17_graph_sclass; + + nv_engine(priv)->tile_prog = nv10_graph_tile_prog; + spin_lock_init(&priv->lock); + return 0; +} + +static void +nv10_graph_dtor(struct nouveau_object *object) +{ + struct nv10_graph_priv *priv = (void *)object; + nouveau_graph_destroy(&priv->base); +} + +static int +nv10_graph_init(struct nouveau_object *object) +{ + struct nouveau_engine *engine = nv_engine(object); + struct nouveau_fb *pfb = nouveau_fb(object); + struct nv10_graph_priv *priv = (void *)engine; + int ret, i; + + ret = nouveau_graph_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF); + nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); + + nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); + nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000); + nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700); + /* nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */ + nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9); + nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31)); + + if (nv_device(priv)->chipset >= 0x17) { + nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000); + nv_wr32(priv, 0x400a10, 0x03ff3fb6); + nv_wr32(priv, 0x400838, 0x002f8684); + nv_wr32(priv, 0x40083c, 0x00115f3f); + nv_wr32(priv, 0x4006b0, 0x40000020); + } else { + nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000); + } + + /* Turn all the tiling regions off. */ + for (i = 0; i < pfb->tile.regions; i++) + engine->tile_prog(engine, i); + + nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000); + nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000); + nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000); + nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000); + nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000); + nv_wr32(priv, NV10_PGRAPH_STATE, 0xFFFFFFFF); + + nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000); + nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100); + nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, 0x08000000); + return 0; +} + +static int +nv10_graph_fini(struct nouveau_object *object, bool suspend) +{ + struct nv10_graph_priv *priv = (void *)object; + return nouveau_graph_fini(&priv->base, suspend); +} + +struct nouveau_oclass +nv10_graph_oclass = { + .handle = NV_ENGINE(GR, 0x10), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv10_graph_ctor, + .dtor = nv10_graph_dtor, + .init = nv10_graph_init, + .fini = nv10_graph_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c new file mode 100644 index 0000000..b245593 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c @@ -0,0 +1,384 @@ +#include <core/client.h> +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> +#include <core/handle.h> +#include <core/enum.h> + +#include <subdev/timer.h> +#include <subdev/fb.h> + +#include <engine/graph.h> +#include <engine/fifo.h> + +#include "nv20.h" +#include "regs.h" + +/******************************************************************************* + * Graphics object classes + ******************************************************************************/ + +static struct nouveau_oclass +nv20_graph_sclass[] = { + { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */ + { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */ + { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */ + { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */ + { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */ + { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */ + { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */ + { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */ + { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */ + { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */ + { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */ + { 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */ + { 0x0097, &nv04_graph_ofuncs, NULL }, /* kelvin */ + { 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */ + { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */ + {}, +}; + +/******************************************************************************* + * PGRAPH context + ******************************************************************************/ + +static int +nv20_graph_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv20_graph_chan *chan; + int ret, i; + + ret = nouveau_graph_context_create(parent, engine, oclass, NULL, + 0x37f0, 16, NVOBJ_FLAG_ZERO_ALLOC, + &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + chan->chid = nouveau_fifo_chan(parent)->chid; + + nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24)); + nv_wo32(chan, 0x033c, 0xffff0000); + nv_wo32(chan, 0x03a0, 0x0fff0000); + nv_wo32(chan, 0x03a4, 0x0fff0000); + nv_wo32(chan, 0x047c, 0x00000101); + nv_wo32(chan, 0x0490, 0x00000111); + nv_wo32(chan, 0x04a8, 0x44400000); + for (i = 0x04d4; i <= 0x04e0; i += 4) + nv_wo32(chan, i, 0x00030303); + for (i = 0x04f4; i <= 0x0500; i += 4) + nv_wo32(chan, i, 0x00080000); + for (i = 0x050c; i <= 0x0518; i += 4) + nv_wo32(chan, i, 0x01012000); + for (i = 0x051c; i <= 0x0528; i += 4) + nv_wo32(chan, i, 0x000105b8); + for (i = 0x052c; i <= 0x0538; i += 4) + nv_wo32(chan, i, 0x00080008); + for (i = 0x055c; i <= 0x0598; i += 4) + nv_wo32(chan, i, 0x07ff0000); + nv_wo32(chan, 0x05a4, 0x4b7fffff); + nv_wo32(chan, 0x05fc, 0x00000001); + nv_wo32(chan, 0x0604, 0x00004000); + nv_wo32(chan, 0x0610, 0x00000001); + nv_wo32(chan, 0x0618, 0x00040000); + nv_wo32(chan, 0x061c, 0x00010000); + for (i = 0x1c1c; i <= 0x248c; i += 16) { + nv_wo32(chan, (i + 0), 0x10700ff9); + nv_wo32(chan, (i + 4), 0x0436086c); + nv_wo32(chan, (i + 8), 0x000c001b); + } + nv_wo32(chan, 0x281c, 0x3f800000); + nv_wo32(chan, 0x2830, 0x3f800000); + nv_wo32(chan, 0x285c, 0x40000000); + nv_wo32(chan, 0x2860, 0x3f800000); + nv_wo32(chan, 0x2864, 0x3f000000); + nv_wo32(chan, 0x286c, 0x40000000); + nv_wo32(chan, 0x2870, 0x3f800000); + nv_wo32(chan, 0x2878, 0xbf800000); + nv_wo32(chan, 0x2880, 0xbf800000); + nv_wo32(chan, 0x34a4, 0x000fe000); + nv_wo32(chan, 0x3530, 0x000003f8); + nv_wo32(chan, 0x3540, 0x002fe000); + for (i = 0x355c; i <= 0x3578; i += 4) + nv_wo32(chan, i, 0x001c527c); + return 0; +} + +int +nv20_graph_context_init(struct nouveau_object *object) +{ + struct nv20_graph_priv *priv = (void *)object->engine; + struct nv20_graph_chan *chan = (void *)object; + int ret; + + ret = nouveau_graph_context_init(&chan->base); + if (ret) + return ret; + + nv_wo32(priv->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4); + return 0; +} + +int +nv20_graph_context_fini(struct nouveau_object *object, bool suspend) +{ + struct nv20_graph_priv *priv = (void *)object->engine; + struct nv20_graph_chan *chan = (void *)object; + int chid = -1; + + nv_mask(priv, 0x400720, 0x00000001, 0x00000000); + if (nv_rd32(priv, 0x400144) & 0x00010000) + chid = (nv_rd32(priv, 0x400148) & 0x1f000000) >> 24; + if (chan->chid == chid) { + nv_wr32(priv, 0x400784, nv_gpuobj(chan)->addr >> 4); + nv_wr32(priv, 0x400788, 0x00000002); + nv_wait(priv, 0x400700, 0xffffffff, 0x00000000); + nv_wr32(priv, 0x400144, 0x10000000); + nv_mask(priv, 0x400148, 0xff000000, 0x1f000000); + } + nv_mask(priv, 0x400720, 0x00000001, 0x00000001); + + nv_wo32(priv->ctxtab, chan->chid * 4, 0x00000000); + return nouveau_graph_context_fini(&chan->base, suspend); +} + +static struct nouveau_oclass +nv20_graph_cclass = { + .handle = NV_ENGCTX(GR, 0x20), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv20_graph_context_ctor, + .dtor = _nouveau_graph_context_dtor, + .init = nv20_graph_context_init, + .fini = nv20_graph_context_fini, + .rd32 = _nouveau_graph_context_rd32, + .wr32 = _nouveau_graph_context_wr32, + }, +}; + +/******************************************************************************* + * PGRAPH engine/subdev functions + ******************************************************************************/ + +void +nv20_graph_tile_prog(struct nouveau_engine *engine, int i) +{ + struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i]; + struct nouveau_fifo *pfifo = nouveau_fifo(engine); + struct nv20_graph_priv *priv = (void *)engine; + unsigned long flags; + + pfifo->pause(pfifo, &flags); + nv04_graph_idle(priv); + + nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit); + nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch); + nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr); + + nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i); + nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->limit); + nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i); + nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->pitch); + nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); + nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr); + + if (nv_device(engine)->chipset != 0x34) { + nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp); + nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i); + nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp); + } + + pfifo->start(pfifo, &flags); +} + +void +nv20_graph_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_engine *engine = nv_engine(subdev); + struct nouveau_object *engctx; + struct nouveau_handle *handle; + struct nv20_graph_priv *priv = (void *)subdev; + u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR); + u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE); + u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS); + u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR); + u32 chid = (addr & 0x01f00000) >> 20; + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA); + u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff; + u32 show = stat; + + engctx = nouveau_engctx_get(engine, chid); + if (stat & NV_PGRAPH_INTR_ERROR) { + if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { + handle = nouveau_handle_get_class(engctx, class); + if (handle && !nv_call(handle->object, mthd, data)) + show &= ~NV_PGRAPH_INTR_ERROR; + nouveau_handle_put(handle); + } + } + + nv_wr32(priv, NV03_PGRAPH_INTR, stat); + nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); + + if (show) { + nv_error(priv, "%s", ""); + nouveau_bitfield_print(nv10_graph_intr_name, show); + pr_cont(" nsource:"); + nouveau_bitfield_print(nv04_graph_nsource, nsource); + pr_cont(" nstatus:"); + nouveau_bitfield_print(nv10_graph_nstatus, nstatus); + pr_cont("\n"); + nv_error(priv, + "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", + chid, nouveau_client_name(engctx), subc, class, mthd, + data); + } + + nouveau_engctx_put(engctx); +} + +static int +nv20_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv20_graph_priv *priv; + int ret; + + ret = nouveau_graph_create(parent, engine, oclass, true, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16, + NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00001000; + nv_subdev(priv)->intr = nv20_graph_intr; + nv_engine(priv)->cclass = &nv20_graph_cclass; + nv_engine(priv)->sclass = nv20_graph_sclass; + nv_engine(priv)->tile_prog = nv20_graph_tile_prog; + return 0; +} + +void +nv20_graph_dtor(struct nouveau_object *object) +{ + struct nv20_graph_priv *priv = (void *)object; + nouveau_gpuobj_ref(NULL, &priv->ctxtab); + nouveau_graph_destroy(&priv->base); +} + +int +nv20_graph_init(struct nouveau_object *object) +{ + struct nouveau_engine *engine = nv_engine(object); + struct nv20_graph_priv *priv = (void *)engine; + struct nouveau_fb *pfb = nouveau_fb(object); + u32 tmp, vramsz; + int ret, i; + + ret = nouveau_graph_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4); + + if (nv_device(priv)->chipset == 0x20) { + nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x003d0000); + for (i = 0; i < 15; i++) + nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000); + nv_wait(priv, 0x400700, 0xffffffff, 0x00000000); + } else { + nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x02c80000); + for (i = 0; i < 32; i++) + nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000); + nv_wait(priv, 0x400700, 0xffffffff, 0x00000000); + } + + nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF); + nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); + + nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); + nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000); + nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700); + nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */ + nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000); + nv_wr32(priv, 0x40009C , 0x00000040); + + if (nv_device(priv)->chipset >= 0x25) { + nv_wr32(priv, 0x400890, 0x00a8cfff); + nv_wr32(priv, 0x400610, 0x304B1FB6); + nv_wr32(priv, 0x400B80, 0x1cbd3883); + nv_wr32(priv, 0x400B84, 0x44000000); + nv_wr32(priv, 0x400098, 0x40000080); + nv_wr32(priv, 0x400B88, 0x000000ff); + + } else { + nv_wr32(priv, 0x400880, 0x0008c7df); + nv_wr32(priv, 0x400094, 0x00000005); + nv_wr32(priv, 0x400B80, 0x45eae20e); + nv_wr32(priv, 0x400B84, 0x24000000); + nv_wr32(priv, 0x400098, 0x00000040); + nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00038); + nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030); + nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E10038); + nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030); + } + + /* Turn all the tiling regions off. */ + for (i = 0; i < pfb->tile.regions; i++) + engine->tile_prog(engine, i); + + nv_wr32(priv, 0x4009a0, nv_rd32(priv, 0x100324)); + nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA000C); + nv_wr32(priv, NV10_PGRAPH_RDI_DATA, nv_rd32(priv, 0x100324)); + + nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100); + nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF); + + tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) & 0x0007ff00; + nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp); + tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) | 0x00020100; + nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp); + + /* begin RAM config */ + vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1; + nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200)); + nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204)); + nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000); + nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100200)); + nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004); + nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100204)); + nv_wr32(priv, 0x400820, 0); + nv_wr32(priv, 0x400824, 0); + nv_wr32(priv, 0x400864, vramsz - 1); + nv_wr32(priv, 0x400868, vramsz - 1); + + /* interesting.. the below overwrites some of the tile setup above.. */ + nv_wr32(priv, 0x400B20, 0x00000000); + nv_wr32(priv, 0x400B04, 0xFFFFFFFF); + + nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMIN, 0); + nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMIN, 0); + nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); + nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); + return 0; +} + +struct nouveau_oclass +nv20_graph_oclass = { + .handle = NV_ENGINE(GR, 0x20), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv20_graph_ctor, + .dtor = nv20_graph_dtor, + .init = nv20_graph_init, + .fini = _nouveau_graph_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h new file mode 100644 index 0000000..2bea731 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h @@ -0,0 +1,31 @@ +#ifndef __NV20_GRAPH_H__ +#define __NV20_GRAPH_H__ + +#include <core/enum.h> + +#include <engine/graph.h> +#include <engine/fifo.h> + +struct nv20_graph_priv { + struct nouveau_graph base; + struct nouveau_gpuobj *ctxtab; +}; + +struct nv20_graph_chan { + struct nouveau_graph_chan base; + int chid; +}; + +extern struct nouveau_oclass nv25_graph_sclass[]; +int nv20_graph_context_init(struct nouveau_object *); +int nv20_graph_context_fini(struct nouveau_object *, bool); + +void nv20_graph_tile_prog(struct nouveau_engine *, int); +void nv20_graph_intr(struct nouveau_subdev *); + +void nv20_graph_dtor(struct nouveau_object *); +int nv20_graph_init(struct nouveau_object *); + +int nv30_graph_init(struct nouveau_object *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c new file mode 100644 index 0000000..7a80d00 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c @@ -0,0 +1,167 @@ +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> +#include <core/enum.h> + +#include <subdev/timer.h> +#include <subdev/fb.h> + +#include <engine/graph.h> + +#include "nv20.h" +#include "regs.h" + +/******************************************************************************* + * Graphics object classes + ******************************************************************************/ + +struct nouveau_oclass +nv25_graph_sclass[] = { + { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */ + { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */ + { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */ + { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */ + { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */ + { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */ + { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */ + { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */ + { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */ + { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */ + { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */ + { 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */ + { 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */ + { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */ + { 0x0597, &nv04_graph_ofuncs, NULL }, /* kelvin */ + {}, +}; + +/******************************************************************************* + * PGRAPH context + ******************************************************************************/ + +static int +nv25_graph_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv20_graph_chan *chan; + int ret, i; + + ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x3724, + 16, NVOBJ_FLAG_ZERO_ALLOC, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + chan->chid = nouveau_fifo_chan(parent)->chid; + + nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24)); + nv_wo32(chan, 0x035c, 0xffff0000); + nv_wo32(chan, 0x03c0, 0x0fff0000); + nv_wo32(chan, 0x03c4, 0x0fff0000); + nv_wo32(chan, 0x049c, 0x00000101); + nv_wo32(chan, 0x04b0, 0x00000111); + nv_wo32(chan, 0x04c8, 0x00000080); + nv_wo32(chan, 0x04cc, 0xffff0000); + nv_wo32(chan, 0x04d0, 0x00000001); + nv_wo32(chan, 0x04e4, 0x44400000); + nv_wo32(chan, 0x04fc, 0x4b800000); + for (i = 0x0510; i <= 0x051c; i += 4) + nv_wo32(chan, i, 0x00030303); + for (i = 0x0530; i <= 0x053c; i += 4) + nv_wo32(chan, i, 0x00080000); + for (i = 0x0548; i <= 0x0554; i += 4) + nv_wo32(chan, i, 0x01012000); + for (i = 0x0558; i <= 0x0564; i += 4) + nv_wo32(chan, i, 0x000105b8); + for (i = 0x0568; i <= 0x0574; i += 4) + nv_wo32(chan, i, 0x00080008); + for (i = 0x0598; i <= 0x05d4; i += 4) + nv_wo32(chan, i, 0x07ff0000); + nv_wo32(chan, 0x05e0, 0x4b7fffff); + nv_wo32(chan, 0x0620, 0x00000080); + nv_wo32(chan, 0x0624, 0x30201000); + nv_wo32(chan, 0x0628, 0x70605040); + nv_wo32(chan, 0x062c, 0xb0a09080); + nv_wo32(chan, 0x0630, 0xf0e0d0c0); + nv_wo32(chan, 0x0664, 0x00000001); + nv_wo32(chan, 0x066c, 0x00004000); + nv_wo32(chan, 0x0678, 0x00000001); + nv_wo32(chan, 0x0680, 0x00040000); + nv_wo32(chan, 0x0684, 0x00010000); + for (i = 0x1b04; i <= 0x2374; i += 16) { + nv_wo32(chan, (i + 0), 0x10700ff9); + nv_wo32(chan, (i + 4), 0x0436086c); + nv_wo32(chan, (i + 8), 0x000c001b); + } + nv_wo32(chan, 0x2704, 0x3f800000); + nv_wo32(chan, 0x2718, 0x3f800000); + nv_wo32(chan, 0x2744, 0x40000000); + nv_wo32(chan, 0x2748, 0x3f800000); + nv_wo32(chan, 0x274c, 0x3f000000); + nv_wo32(chan, 0x2754, 0x40000000); + nv_wo32(chan, 0x2758, 0x3f800000); + nv_wo32(chan, 0x2760, 0xbf800000); + nv_wo32(chan, 0x2768, 0xbf800000); + nv_wo32(chan, 0x308c, 0x000fe000); + nv_wo32(chan, 0x3108, 0x000003f8); + nv_wo32(chan, 0x3468, 0x002fe000); + for (i = 0x3484; i <= 0x34a0; i += 4) + nv_wo32(chan, i, 0x001c527c); + return 0; +} + +static struct nouveau_oclass +nv25_graph_cclass = { + .handle = NV_ENGCTX(GR, 0x25), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv25_graph_context_ctor, + .dtor = _nouveau_graph_context_dtor, + .init = nv20_graph_context_init, + .fini = nv20_graph_context_fini, + .rd32 = _nouveau_graph_context_rd32, + .wr32 = _nouveau_graph_context_wr32, + }, +}; + +/******************************************************************************* + * PGRAPH engine/subdev functions + ******************************************************************************/ + +static int +nv25_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv20_graph_priv *priv; + int ret; + + ret = nouveau_graph_create(parent, engine, oclass, true, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16, + NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00001000; + nv_subdev(priv)->intr = nv20_graph_intr; + nv_engine(priv)->cclass = &nv25_graph_cclass; + nv_engine(priv)->sclass = nv25_graph_sclass; + nv_engine(priv)->tile_prog = nv20_graph_tile_prog; + return 0; +} + +struct nouveau_oclass +nv25_graph_oclass = { + .handle = NV_ENGINE(GR, 0x25), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv25_graph_ctor, + .dtor = nv20_graph_dtor, + .init = nv20_graph_init, + .fini = _nouveau_graph_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c new file mode 100644 index 0000000..3e1f32e --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c @@ -0,0 +1,134 @@ +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> +#include <core/enum.h> + +#include <subdev/timer.h> +#include <subdev/fb.h> + +#include <engine/graph.h> + +#include "nv20.h" +#include "regs.h" + +/******************************************************************************* + * PGRAPH context + ******************************************************************************/ + +static int +nv2a_graph_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv20_graph_chan *chan; + int ret, i; + + ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x36b0, + 16, NVOBJ_FLAG_ZERO_ALLOC, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + chan->chid = nouveau_fifo_chan(parent)->chid; + + nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24)); + nv_wo32(chan, 0x033c, 0xffff0000); + nv_wo32(chan, 0x03a0, 0x0fff0000); + nv_wo32(chan, 0x03a4, 0x0fff0000); + nv_wo32(chan, 0x047c, 0x00000101); + nv_wo32(chan, 0x0490, 0x00000111); + nv_wo32(chan, 0x04a8, 0x44400000); + for (i = 0x04d4; i <= 0x04e0; i += 4) + nv_wo32(chan, i, 0x00030303); + for (i = 0x04f4; i <= 0x0500; i += 4) + nv_wo32(chan, i, 0x00080000); + for (i = 0x050c; i <= 0x0518; i += 4) + nv_wo32(chan, i, 0x01012000); + for (i = 0x051c; i <= 0x0528; i += 4) + nv_wo32(chan, i, 0x000105b8); + for (i = 0x052c; i <= 0x0538; i += 4) + nv_wo32(chan, i, 0x00080008); + for (i = 0x055c; i <= 0x0598; i += 4) + nv_wo32(chan, i, 0x07ff0000); + nv_wo32(chan, 0x05a4, 0x4b7fffff); + nv_wo32(chan, 0x05fc, 0x00000001); + nv_wo32(chan, 0x0604, 0x00004000); + nv_wo32(chan, 0x0610, 0x00000001); + nv_wo32(chan, 0x0618, 0x00040000); + nv_wo32(chan, 0x061c, 0x00010000); + for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */ + nv_wo32(chan, (i + 0), 0x10700ff9); + nv_wo32(chan, (i + 4), 0x0436086c); + nv_wo32(chan, (i + 8), 0x000c001b); + } + nv_wo32(chan, 0x269c, 0x3f800000); + nv_wo32(chan, 0x26b0, 0x3f800000); + nv_wo32(chan, 0x26dc, 0x40000000); + nv_wo32(chan, 0x26e0, 0x3f800000); + nv_wo32(chan, 0x26e4, 0x3f000000); + nv_wo32(chan, 0x26ec, 0x40000000); + nv_wo32(chan, 0x26f0, 0x3f800000); + nv_wo32(chan, 0x26f8, 0xbf800000); + nv_wo32(chan, 0x2700, 0xbf800000); + nv_wo32(chan, 0x3024, 0x000fe000); + nv_wo32(chan, 0x30a0, 0x000003f8); + nv_wo32(chan, 0x33fc, 0x002fe000); + for (i = 0x341c; i <= 0x3438; i += 4) + nv_wo32(chan, i, 0x001c527c); + return 0; +} + +static struct nouveau_oclass +nv2a_graph_cclass = { + .handle = NV_ENGCTX(GR, 0x2a), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv2a_graph_context_ctor, + .dtor = _nouveau_graph_context_dtor, + .init = nv20_graph_context_init, + .fini = nv20_graph_context_fini, + .rd32 = _nouveau_graph_context_rd32, + .wr32 = _nouveau_graph_context_wr32, + }, +}; + +/******************************************************************************* + * PGRAPH engine/subdev functions + ******************************************************************************/ + +static int +nv2a_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv20_graph_priv *priv; + int ret; + + ret = nouveau_graph_create(parent, engine, oclass, true, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16, + NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00001000; + nv_subdev(priv)->intr = nv20_graph_intr; + nv_engine(priv)->cclass = &nv2a_graph_cclass; + nv_engine(priv)->sclass = nv25_graph_sclass; + nv_engine(priv)->tile_prog = nv20_graph_tile_prog; + return 0; +} + +struct nouveau_oclass +nv2a_graph_oclass = { + .handle = NV_ENGINE(GR, 0x2a), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv2a_graph_ctor, + .dtor = nv20_graph_dtor, + .init = nv20_graph_init, + .fini = _nouveau_graph_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c new file mode 100644 index 0000000..e451db3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c @@ -0,0 +1,238 @@ +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> +#include <core/enum.h> + +#include <subdev/timer.h> +#include <subdev/fb.h> + +#include <engine/graph.h> + +#include "nv20.h" +#include "regs.h" + +/******************************************************************************* + * Graphics object classes + ******************************************************************************/ + +static struct nouveau_oclass +nv30_graph_sclass[] = { + { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */ + { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */ + { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */ + { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */ + { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */ + { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */ + { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */ + { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */ + { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */ + { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */ + { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */ + { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */ + { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */ + { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */ + { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */ + { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */ + { 0x0397, &nv04_graph_ofuncs, NULL }, /* rankine */ + {}, +}; + +/******************************************************************************* + * PGRAPH context + ******************************************************************************/ + +static int +nv30_graph_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv20_graph_chan *chan; + int ret, i; + + ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x5f48, + 16, NVOBJ_FLAG_ZERO_ALLOC, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + chan->chid = nouveau_fifo_chan(parent)->chid; + + nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24)); + nv_wo32(chan, 0x0410, 0x00000101); + nv_wo32(chan, 0x0424, 0x00000111); + nv_wo32(chan, 0x0428, 0x00000060); + nv_wo32(chan, 0x0444, 0x00000080); + nv_wo32(chan, 0x0448, 0xffff0000); + nv_wo32(chan, 0x044c, 0x00000001); + nv_wo32(chan, 0x0460, 0x44400000); + nv_wo32(chan, 0x048c, 0xffff0000); + for (i = 0x04e0; i < 0x04e8; i += 4) + nv_wo32(chan, i, 0x0fff0000); + nv_wo32(chan, 0x04ec, 0x00011100); + for (i = 0x0508; i < 0x0548; i += 4) + nv_wo32(chan, i, 0x07ff0000); + nv_wo32(chan, 0x0550, 0x4b7fffff); + nv_wo32(chan, 0x058c, 0x00000080); + nv_wo32(chan, 0x0590, 0x30201000); + nv_wo32(chan, 0x0594, 0x70605040); + nv_wo32(chan, 0x0598, 0xb8a89888); + nv_wo32(chan, 0x059c, 0xf8e8d8c8); + nv_wo32(chan, 0x05b0, 0xb0000000); + for (i = 0x0600; i < 0x0640; i += 4) + nv_wo32(chan, i, 0x00010588); + for (i = 0x0640; i < 0x0680; i += 4) + nv_wo32(chan, i, 0x00030303); + for (i = 0x06c0; i < 0x0700; i += 4) + nv_wo32(chan, i, 0x0008aae4); + for (i = 0x0700; i < 0x0740; i += 4) + nv_wo32(chan, i, 0x01012000); + for (i = 0x0740; i < 0x0780; i += 4) + nv_wo32(chan, i, 0x00080008); + nv_wo32(chan, 0x085c, 0x00040000); + nv_wo32(chan, 0x0860, 0x00010000); + for (i = 0x0864; i < 0x0874; i += 4) + nv_wo32(chan, i, 0x00040004); + for (i = 0x1f18; i <= 0x3088 ; i += 16) { + nv_wo32(chan, i + 0, 0x10700ff9); + nv_wo32(chan, i + 1, 0x0436086c); + nv_wo32(chan, i + 2, 0x000c001b); + } + for (i = 0x30b8; i < 0x30c8; i += 4) + nv_wo32(chan, i, 0x0000ffff); + nv_wo32(chan, 0x344c, 0x3f800000); + nv_wo32(chan, 0x3808, 0x3f800000); + nv_wo32(chan, 0x381c, 0x3f800000); + nv_wo32(chan, 0x3848, 0x40000000); + nv_wo32(chan, 0x384c, 0x3f800000); + nv_wo32(chan, 0x3850, 0x3f000000); + nv_wo32(chan, 0x3858, 0x40000000); + nv_wo32(chan, 0x385c, 0x3f800000); + nv_wo32(chan, 0x3864, 0xbf800000); + nv_wo32(chan, 0x386c, 0xbf800000); + return 0; +} + +static struct nouveau_oclass +nv30_graph_cclass = { + .handle = NV_ENGCTX(GR, 0x30), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv30_graph_context_ctor, + .dtor = _nouveau_graph_context_dtor, + .init = nv20_graph_context_init, + .fini = nv20_graph_context_fini, + .rd32 = _nouveau_graph_context_rd32, + .wr32 = _nouveau_graph_context_wr32, + }, +}; + +/******************************************************************************* + * PGRAPH engine/subdev functions + ******************************************************************************/ + +static int +nv30_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv20_graph_priv *priv; + int ret; + + ret = nouveau_graph_create(parent, engine, oclass, true, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16, + NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00001000; + nv_subdev(priv)->intr = nv20_graph_intr; + nv_engine(priv)->cclass = &nv30_graph_cclass; + nv_engine(priv)->sclass = nv30_graph_sclass; + nv_engine(priv)->tile_prog = nv20_graph_tile_prog; + return 0; +} + +int +nv30_graph_init(struct nouveau_object *object) +{ + struct nouveau_engine *engine = nv_engine(object); + struct nv20_graph_priv *priv = (void *)engine; + struct nouveau_fb *pfb = nouveau_fb(object); + int ret, i; + + ret = nouveau_graph_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4); + + nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF); + nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); + + nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); + nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000); + nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0); + nv_wr32(priv, 0x400890, 0x01b463ff); + nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf2de0475); + nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000); + nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6); + nv_wr32(priv, 0x400B80, 0x1003d888); + nv_wr32(priv, 0x400B84, 0x0c000000); + nv_wr32(priv, 0x400098, 0x00000000); + nv_wr32(priv, 0x40009C, 0x0005ad00); + nv_wr32(priv, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */ + nv_wr32(priv, 0x4000a0, 0x00000000); + nv_wr32(priv, 0x4000a4, 0x00000008); + nv_wr32(priv, 0x4008a8, 0xb784a400); + nv_wr32(priv, 0x400ba0, 0x002f8685); + nv_wr32(priv, 0x400ba4, 0x00231f3f); + nv_wr32(priv, 0x4008a4, 0x40000020); + + if (nv_device(priv)->chipset == 0x34) { + nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004); + nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00200201); + nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0008); + nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000008); + nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000); + nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000032); + nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00004); + nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000002); + } + + nv_wr32(priv, 0x4000c0, 0x00000016); + + /* Turn all the tiling regions off. */ + for (i = 0; i < pfb->tile.regions; i++) + engine->tile_prog(engine, i); + + nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100); + nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF); + nv_wr32(priv, 0x0040075c , 0x00000001); + + /* begin RAM config */ + /* vramsz = pci_resource_len(priv->dev->pdev, 0) - 1; */ + nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200)); + nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204)); + if (nv_device(priv)->chipset != 0x34) { + nv_wr32(priv, 0x400750, 0x00EA0000); + nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100200)); + nv_wr32(priv, 0x400750, 0x00EA0004); + nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100204)); + } + return 0; +} + +struct nouveau_oclass +nv30_graph_oclass = { + .handle = NV_ENGINE(GR, 0x30), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv30_graph_ctor, + .dtor = nv20_graph_dtor, + .init = nv30_graph_init, + .fini = _nouveau_graph_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c new file mode 100644 index 0000000..9385ac7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c @@ -0,0 +1,168 @@ +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> +#include <core/enum.h> + +#include <subdev/timer.h> +#include <subdev/fb.h> + +#include <engine/graph.h> + +#include "nv20.h" +#include "regs.h" + +/******************************************************************************* + * Graphics object classes + ******************************************************************************/ + +static struct nouveau_oclass +nv34_graph_sclass[] = { + { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */ + { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */ + { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */ + { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */ + { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */ + { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */ + { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */ + { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */ + { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */ + { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */ + { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */ + { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */ + { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */ + { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */ + { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */ + { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */ + { 0x0697, &nv04_graph_ofuncs, NULL }, /* rankine */ + {}, +}; + +/******************************************************************************* + * PGRAPH context + ******************************************************************************/ + +static int +nv34_graph_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv20_graph_chan *chan; + int ret, i; + + ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x46dc, + 16, NVOBJ_FLAG_ZERO_ALLOC, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + chan->chid = nouveau_fifo_chan(parent)->chid; + + nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24)); + nv_wo32(chan, 0x040c, 0x01000101); + nv_wo32(chan, 0x0420, 0x00000111); + nv_wo32(chan, 0x0424, 0x00000060); + nv_wo32(chan, 0x0440, 0x00000080); + nv_wo32(chan, 0x0444, 0xffff0000); + nv_wo32(chan, 0x0448, 0x00000001); + nv_wo32(chan, 0x045c, 0x44400000); + nv_wo32(chan, 0x0480, 0xffff0000); + for (i = 0x04d4; i < 0x04dc; i += 4) + nv_wo32(chan, i, 0x0fff0000); + nv_wo32(chan, 0x04e0, 0x00011100); + for (i = 0x04fc; i < 0x053c; i += 4) + nv_wo32(chan, i, 0x07ff0000); + nv_wo32(chan, 0x0544, 0x4b7fffff); + nv_wo32(chan, 0x057c, 0x00000080); + nv_wo32(chan, 0x0580, 0x30201000); + nv_wo32(chan, 0x0584, 0x70605040); + nv_wo32(chan, 0x0588, 0xb8a89888); + nv_wo32(chan, 0x058c, 0xf8e8d8c8); + nv_wo32(chan, 0x05a0, 0xb0000000); + for (i = 0x05f0; i < 0x0630; i += 4) + nv_wo32(chan, i, 0x00010588); + for (i = 0x0630; i < 0x0670; i += 4) + nv_wo32(chan, i, 0x00030303); + for (i = 0x06b0; i < 0x06f0; i += 4) + nv_wo32(chan, i, 0x0008aae4); + for (i = 0x06f0; i < 0x0730; i += 4) + nv_wo32(chan, i, 0x01012000); + for (i = 0x0730; i < 0x0770; i += 4) + nv_wo32(chan, i, 0x00080008); + nv_wo32(chan, 0x0850, 0x00040000); + nv_wo32(chan, 0x0854, 0x00010000); + for (i = 0x0858; i < 0x0868; i += 4) + nv_wo32(chan, i, 0x00040004); + for (i = 0x15ac; i <= 0x271c ; i += 16) { + nv_wo32(chan, i + 0, 0x10700ff9); + nv_wo32(chan, i + 1, 0x0436086c); + nv_wo32(chan, i + 2, 0x000c001b); + } + for (i = 0x274c; i < 0x275c; i += 4) + nv_wo32(chan, i, 0x0000ffff); + nv_wo32(chan, 0x2ae0, 0x3f800000); + nv_wo32(chan, 0x2e9c, 0x3f800000); + nv_wo32(chan, 0x2eb0, 0x3f800000); + nv_wo32(chan, 0x2edc, 0x40000000); + nv_wo32(chan, 0x2ee0, 0x3f800000); + nv_wo32(chan, 0x2ee4, 0x3f000000); + nv_wo32(chan, 0x2eec, 0x40000000); + nv_wo32(chan, 0x2ef0, 0x3f800000); + nv_wo32(chan, 0x2ef8, 0xbf800000); + nv_wo32(chan, 0x2f00, 0xbf800000); + return 0; +} + +static struct nouveau_oclass +nv34_graph_cclass = { + .handle = NV_ENGCTX(GR, 0x34), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv34_graph_context_ctor, + .dtor = _nouveau_graph_context_dtor, + .init = nv20_graph_context_init, + .fini = nv20_graph_context_fini, + .rd32 = _nouveau_graph_context_rd32, + .wr32 = _nouveau_graph_context_wr32, + }, +}; + +/******************************************************************************* + * PGRAPH engine/subdev functions + ******************************************************************************/ + +static int +nv34_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv20_graph_priv *priv; + int ret; + + ret = nouveau_graph_create(parent, engine, oclass, true, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16, + NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00001000; + nv_subdev(priv)->intr = nv20_graph_intr; + nv_engine(priv)->cclass = &nv34_graph_cclass; + nv_engine(priv)->sclass = nv34_graph_sclass; + nv_engine(priv)->tile_prog = nv20_graph_tile_prog; + return 0; +} + +struct nouveau_oclass +nv34_graph_oclass = { + .handle = NV_ENGINE(GR, 0x34), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv34_graph_ctor, + .dtor = nv20_graph_dtor, + .init = nv30_graph_init, + .fini = _nouveau_graph_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c new file mode 100644 index 0000000..9ce84b7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c @@ -0,0 +1,166 @@ +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> +#include <core/enum.h> + +#include <subdev/timer.h> +#include <subdev/fb.h> + +#include "nv20.h" +#include "regs.h" + +/******************************************************************************* + * Graphics object classes + ******************************************************************************/ + +static struct nouveau_oclass +nv35_graph_sclass[] = { + { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */ + { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */ + { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */ + { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */ + { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */ + { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */ + { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */ + { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */ + { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */ + { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */ + { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */ + { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */ + { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */ + { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */ + { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */ + { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */ + { 0x0497, &nv04_graph_ofuncs, NULL }, /* rankine */ + {}, +}; + +/******************************************************************************* + * PGRAPH context + ******************************************************************************/ + +static int +nv35_graph_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv20_graph_chan *chan; + int ret, i; + + ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x577c, + 16, NVOBJ_FLAG_ZERO_ALLOC, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + chan->chid = nouveau_fifo_chan(parent)->chid; + + nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24)); + nv_wo32(chan, 0x040c, 0x00000101); + nv_wo32(chan, 0x0420, 0x00000111); + nv_wo32(chan, 0x0424, 0x00000060); + nv_wo32(chan, 0x0440, 0x00000080); + nv_wo32(chan, 0x0444, 0xffff0000); + nv_wo32(chan, 0x0448, 0x00000001); + nv_wo32(chan, 0x045c, 0x44400000); + nv_wo32(chan, 0x0488, 0xffff0000); + for (i = 0x04dc; i < 0x04e4; i += 4) + nv_wo32(chan, i, 0x0fff0000); + nv_wo32(chan, 0x04e8, 0x00011100); + for (i = 0x0504; i < 0x0544; i += 4) + nv_wo32(chan, i, 0x07ff0000); + nv_wo32(chan, 0x054c, 0x4b7fffff); + nv_wo32(chan, 0x0588, 0x00000080); + nv_wo32(chan, 0x058c, 0x30201000); + nv_wo32(chan, 0x0590, 0x70605040); + nv_wo32(chan, 0x0594, 0xb8a89888); + nv_wo32(chan, 0x0598, 0xf8e8d8c8); + nv_wo32(chan, 0x05ac, 0xb0000000); + for (i = 0x0604; i < 0x0644; i += 4) + nv_wo32(chan, i, 0x00010588); + for (i = 0x0644; i < 0x0684; i += 4) + nv_wo32(chan, i, 0x00030303); + for (i = 0x06c4; i < 0x0704; i += 4) + nv_wo32(chan, i, 0x0008aae4); + for (i = 0x0704; i < 0x0744; i += 4) + nv_wo32(chan, i, 0x01012000); + for (i = 0x0744; i < 0x0784; i += 4) + nv_wo32(chan, i, 0x00080008); + nv_wo32(chan, 0x0860, 0x00040000); + nv_wo32(chan, 0x0864, 0x00010000); + for (i = 0x0868; i < 0x0878; i += 4) + nv_wo32(chan, i, 0x00040004); + for (i = 0x1f1c; i <= 0x308c ; i += 16) { + nv_wo32(chan, i + 0, 0x10700ff9); + nv_wo32(chan, i + 4, 0x0436086c); + nv_wo32(chan, i + 8, 0x000c001b); + } + for (i = 0x30bc; i < 0x30cc; i += 4) + nv_wo32(chan, i, 0x0000ffff); + nv_wo32(chan, 0x3450, 0x3f800000); + nv_wo32(chan, 0x380c, 0x3f800000); + nv_wo32(chan, 0x3820, 0x3f800000); + nv_wo32(chan, 0x384c, 0x40000000); + nv_wo32(chan, 0x3850, 0x3f800000); + nv_wo32(chan, 0x3854, 0x3f000000); + nv_wo32(chan, 0x385c, 0x40000000); + nv_wo32(chan, 0x3860, 0x3f800000); + nv_wo32(chan, 0x3868, 0xbf800000); + nv_wo32(chan, 0x3870, 0xbf800000); + return 0; +} + +static struct nouveau_oclass +nv35_graph_cclass = { + .handle = NV_ENGCTX(GR, 0x35), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv35_graph_context_ctor, + .dtor = _nouveau_graph_context_dtor, + .init = nv20_graph_context_init, + .fini = nv20_graph_context_fini, + .rd32 = _nouveau_graph_context_rd32, + .wr32 = _nouveau_graph_context_wr32, + }, +}; + +/******************************************************************************* + * PGRAPH engine/subdev functions + ******************************************************************************/ + +static int +nv35_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv20_graph_priv *priv; + int ret; + + ret = nouveau_graph_create(parent, engine, oclass, true, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16, + NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00001000; + nv_subdev(priv)->intr = nv20_graph_intr; + nv_engine(priv)->cclass = &nv35_graph_cclass; + nv_engine(priv)->sclass = nv35_graph_sclass; + nv_engine(priv)->tile_prog = nv20_graph_tile_prog; + return 0; +} + +struct nouveau_oclass +nv35_graph_oclass = { + .handle = NV_ENGINE(GR, 0x35), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv35_graph_ctor, + .dtor = nv20_graph_dtor, + .init = nv30_graph_init, + .fini = _nouveau_graph_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c new file mode 100644 index 0000000..193a5de --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c @@ -0,0 +1,537 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/client.h> +#include <core/os.h> +#include <core/class.h> +#include <core/handle.h> +#include <core/engctx.h> + +#include <subdev/fb.h> +#include <subdev/timer.h> + +#include <engine/graph.h> +#include <engine/fifo.h> + +#include "nv40.h" +#include "regs.h" + +struct nv40_graph_priv { + struct nouveau_graph base; + u32 size; +}; + +struct nv40_graph_chan { + struct nouveau_graph_chan base; +}; + +static u64 +nv40_graph_units(struct nouveau_graph *graph) +{ + struct nv40_graph_priv *priv = (void *)graph; + + return nv_rd32(priv, 0x1540); +} + +/******************************************************************************* + * Graphics object classes + ******************************************************************************/ + +static int +nv40_graph_object_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_gpuobj *obj; + int ret; + + ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent, + 20, 16, 0, &obj); + *pobject = nv_object(obj); + if (ret) + return ret; + + nv_wo32(obj, 0x00, nv_mclass(obj)); + nv_wo32(obj, 0x04, 0x00000000); + nv_wo32(obj, 0x08, 0x00000000); +#ifdef __BIG_ENDIAN + nv_mo32(obj, 0x08, 0x01000000, 0x01000000); +#endif + nv_wo32(obj, 0x0c, 0x00000000); + nv_wo32(obj, 0x10, 0x00000000); + return 0; +} + +static struct nouveau_ofuncs +nv40_graph_ofuncs = { + .ctor = nv40_graph_object_ctor, + .dtor = _nouveau_gpuobj_dtor, + .init = _nouveau_gpuobj_init, + .fini = _nouveau_gpuobj_fini, + .rd32 = _nouveau_gpuobj_rd32, + .wr32 = _nouveau_gpuobj_wr32, +}; + +static struct nouveau_oclass +nv40_graph_sclass[] = { + { 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */ + { 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */ + { 0x0030, &nv40_graph_ofuncs, NULL }, /* null */ + { 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */ + { 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */ + { 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */ + { 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */ + { 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */ + { 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */ + { 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */ + { 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */ + { 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */ + { 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */ + { 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */ + { 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */ + { 0x4097, &nv40_graph_ofuncs, NULL }, /* curie */ + {}, +}; + +static struct nouveau_oclass +nv44_graph_sclass[] = { + { 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */ + { 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */ + { 0x0030, &nv40_graph_ofuncs, NULL }, /* null */ + { 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */ + { 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */ + { 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */ + { 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */ + { 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */ + { 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */ + { 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */ + { 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */ + { 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */ + { 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */ + { 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */ + { 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */ + { 0x4497, &nv40_graph_ofuncs, NULL }, /* curie */ + {}, +}; + +/******************************************************************************* + * PGRAPH context + ******************************************************************************/ + +static int +nv40_graph_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv40_graph_priv *priv = (void *)engine; + struct nv40_graph_chan *chan; + int ret; + + ret = nouveau_graph_context_create(parent, engine, oclass, NULL, + priv->size, 16, + NVOBJ_FLAG_ZERO_ALLOC, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + nv40_grctx_fill(nv_device(priv), nv_gpuobj(chan)); + nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4); + return 0; +} + +static int +nv40_graph_context_fini(struct nouveau_object *object, bool suspend) +{ + struct nv40_graph_priv *priv = (void *)object->engine; + struct nv40_graph_chan *chan = (void *)object; + u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4; + int ret = 0; + + nv_mask(priv, 0x400720, 0x00000001, 0x00000000); + + if (nv_rd32(priv, 0x40032c) == inst) { + if (suspend) { + nv_wr32(priv, 0x400720, 0x00000000); + nv_wr32(priv, 0x400784, inst); + nv_mask(priv, 0x400310, 0x00000020, 0x00000020); + nv_mask(priv, 0x400304, 0x00000001, 0x00000001); + if (!nv_wait(priv, 0x400300, 0x00000001, 0x00000000)) { + u32 insn = nv_rd32(priv, 0x400308); + nv_warn(priv, "ctxprog timeout 0x%08x\n", insn); + ret = -EBUSY; + } + } + + nv_mask(priv, 0x40032c, 0x01000000, 0x00000000); + } + + if (nv_rd32(priv, 0x400330) == inst) + nv_mask(priv, 0x400330, 0x01000000, 0x00000000); + + nv_mask(priv, 0x400720, 0x00000001, 0x00000001); + return ret; +} + +static struct nouveau_oclass +nv40_graph_cclass = { + .handle = NV_ENGCTX(GR, 0x40), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv40_graph_context_ctor, + .dtor = _nouveau_graph_context_dtor, + .init = _nouveau_graph_context_init, + .fini = nv40_graph_context_fini, + .rd32 = _nouveau_graph_context_rd32, + .wr32 = _nouveau_graph_context_wr32, + }, +}; + +/******************************************************************************* + * PGRAPH engine/subdev functions + ******************************************************************************/ + +static void +nv40_graph_tile_prog(struct nouveau_engine *engine, int i) +{ + struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i]; + struct nouveau_fifo *pfifo = nouveau_fifo(engine); + struct nv40_graph_priv *priv = (void *)engine; + unsigned long flags; + + pfifo->pause(pfifo, &flags); + nv04_graph_idle(priv); + + switch (nv_device(priv)->chipset) { + case 0x40: + case 0x41: + case 0x42: + case 0x43: + case 0x45: + case 0x4e: + nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch); + nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit); + nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr); + nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch); + nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit); + nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr); + switch (nv_device(priv)->chipset) { + case 0x40: + case 0x45: + nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp); + nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp); + break; + case 0x41: + case 0x42: + case 0x43: + nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp); + nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp); + break; + default: + break; + } + break; + case 0x44: + case 0x4a: + nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch); + nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit); + nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr); + break; + case 0x46: + case 0x4c: + case 0x47: + case 0x49: + case 0x4b: + case 0x63: + case 0x67: + case 0x68: + nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch); + nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit); + nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr); + nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch); + nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit); + nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr); + switch (nv_device(priv)->chipset) { + case 0x47: + case 0x49: + case 0x4b: + nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp); + nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp); + break; + default: + break; + } + break; + default: + break; + } + + pfifo->start(pfifo, &flags); +} + +static void +nv40_graph_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_fifo *pfifo = nouveau_fifo(subdev); + struct nouveau_engine *engine = nv_engine(subdev); + struct nouveau_object *engctx; + struct nouveau_handle *handle = NULL; + struct nv40_graph_priv *priv = (void *)subdev; + u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR); + u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE); + u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS); + u32 inst = nv_rd32(priv, 0x40032c) & 0x000fffff; + u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR); + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA); + u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xffff; + u32 show = stat; + int chid; + + engctx = nouveau_engctx_get(engine, inst); + chid = pfifo->chid(pfifo, engctx); + + if (stat & NV_PGRAPH_INTR_ERROR) { + if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { + handle = nouveau_handle_get_class(engctx, class); + if (handle && !nv_call(handle->object, mthd, data)) + show &= ~NV_PGRAPH_INTR_ERROR; + nouveau_handle_put(handle); + } + + if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) { + nv_mask(priv, 0x402000, 0, 0); + } + } + + nv_wr32(priv, NV03_PGRAPH_INTR, stat); + nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); + + if (show) { + nv_error(priv, "%s", ""); + nouveau_bitfield_print(nv10_graph_intr_name, show); + pr_cont(" nsource:"); + nouveau_bitfield_print(nv04_graph_nsource, nsource); + pr_cont(" nstatus:"); + nouveau_bitfield_print(nv10_graph_nstatus, nstatus); + pr_cont("\n"); + nv_error(priv, + "ch %d [0x%08x %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", + chid, inst << 4, nouveau_client_name(engctx), subc, + class, mthd, data); + } + + nouveau_engctx_put(engctx); +} + +static int +nv40_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv40_graph_priv *priv; + int ret; + + ret = nouveau_graph_create(parent, engine, oclass, true, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00001000; + nv_subdev(priv)->intr = nv40_graph_intr; + nv_engine(priv)->cclass = &nv40_graph_cclass; + if (nv44_graph_class(priv)) + nv_engine(priv)->sclass = nv44_graph_sclass; + else + nv_engine(priv)->sclass = nv40_graph_sclass; + nv_engine(priv)->tile_prog = nv40_graph_tile_prog; + + priv->base.units = nv40_graph_units; + return 0; +} + +static int +nv40_graph_init(struct nouveau_object *object) +{ + struct nouveau_engine *engine = nv_engine(object); + struct nouveau_fb *pfb = nouveau_fb(object); + struct nv40_graph_priv *priv = (void *)engine; + int ret, i, j; + u32 vramsz; + + ret = nouveau_graph_init(&priv->base); + if (ret) + return ret; + + /* generate and upload context program */ + ret = nv40_grctx_init(nv_device(priv), &priv->size); + if (ret) + return ret; + + /* No context present currently */ + nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); + + nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF); + nv_wr32(priv, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); + + nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); + nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000); + nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0); + nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xe0de8055); + nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000); + nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f); + + nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100); + nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF); + + j = nv_rd32(priv, 0x1540) & 0xff; + if (j) { + for (i = 0; !(j & 1); j >>= 1, i++) + ; + nv_wr32(priv, 0x405000, i); + } + + if (nv_device(priv)->chipset == 0x40) { + nv_wr32(priv, 0x4009b0, 0x83280fff); + nv_wr32(priv, 0x4009b4, 0x000000a0); + } else { + nv_wr32(priv, 0x400820, 0x83280eff); + nv_wr32(priv, 0x400824, 0x000000a0); + } + + switch (nv_device(priv)->chipset) { + case 0x40: + case 0x45: + nv_wr32(priv, 0x4009b8, 0x0078e366); + nv_wr32(priv, 0x4009bc, 0x0000014c); + break; + case 0x41: + case 0x42: /* pciid also 0x00Cx */ + /* case 0x0120: XXX (pciid) */ + nv_wr32(priv, 0x400828, 0x007596ff); + nv_wr32(priv, 0x40082c, 0x00000108); + break; + case 0x43: + nv_wr32(priv, 0x400828, 0x0072cb77); + nv_wr32(priv, 0x40082c, 0x00000108); + break; + case 0x44: + case 0x46: /* G72 */ + case 0x4a: + case 0x4c: /* G7x-based C51 */ + case 0x4e: + nv_wr32(priv, 0x400860, 0); + nv_wr32(priv, 0x400864, 0); + break; + case 0x47: /* G70 */ + case 0x49: /* G71 */ + case 0x4b: /* G73 */ + nv_wr32(priv, 0x400828, 0x07830610); + nv_wr32(priv, 0x40082c, 0x0000016A); + break; + default: + break; + } + + nv_wr32(priv, 0x400b38, 0x2ffff800); + nv_wr32(priv, 0x400b3c, 0x00006000); + + /* Tiling related stuff. */ + switch (nv_device(priv)->chipset) { + case 0x44: + case 0x4a: + nv_wr32(priv, 0x400bc4, 0x1003d888); + nv_wr32(priv, 0x400bbc, 0xb7a7b500); + break; + case 0x46: + nv_wr32(priv, 0x400bc4, 0x0000e024); + nv_wr32(priv, 0x400bbc, 0xb7a7b520); + break; + case 0x4c: + case 0x4e: + case 0x67: + nv_wr32(priv, 0x400bc4, 0x1003d888); + nv_wr32(priv, 0x400bbc, 0xb7a7b540); + break; + default: + break; + } + + /* Turn all the tiling regions off. */ + for (i = 0; i < pfb->tile.regions; i++) + engine->tile_prog(engine, i); + + /* begin RAM config */ + vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1; + switch (nv_device(priv)->chipset) { + case 0x40: + nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200)); + nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204)); + nv_wr32(priv, 0x4069A4, nv_rd32(priv, 0x100200)); + nv_wr32(priv, 0x4069A8, nv_rd32(priv, 0x100204)); + nv_wr32(priv, 0x400820, 0); + nv_wr32(priv, 0x400824, 0); + nv_wr32(priv, 0x400864, vramsz); + nv_wr32(priv, 0x400868, vramsz); + break; + default: + switch (nv_device(priv)->chipset) { + case 0x41: + case 0x42: + case 0x43: + case 0x45: + case 0x4e: + case 0x44: + case 0x4a: + nv_wr32(priv, 0x4009F0, nv_rd32(priv, 0x100200)); + nv_wr32(priv, 0x4009F4, nv_rd32(priv, 0x100204)); + break; + default: + nv_wr32(priv, 0x400DF0, nv_rd32(priv, 0x100200)); + nv_wr32(priv, 0x400DF4, nv_rd32(priv, 0x100204)); + break; + } + nv_wr32(priv, 0x4069F0, nv_rd32(priv, 0x100200)); + nv_wr32(priv, 0x4069F4, nv_rd32(priv, 0x100204)); + nv_wr32(priv, 0x400840, 0); + nv_wr32(priv, 0x400844, 0); + nv_wr32(priv, 0x4008A0, vramsz); + nv_wr32(priv, 0x4008A4, vramsz); + break; + } + + return 0; +} + +struct nouveau_oclass +nv40_graph_oclass = { + .handle = NV_ENGINE(GR, 0x40), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv40_graph_ctor, + .dtor = _nouveau_graph_dtor, + .init = nv40_graph_init, + .fini = _nouveau_graph_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h new file mode 100644 index 0000000..7da35a4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h @@ -0,0 +1,21 @@ +#ifndef __NV40_GRAPH_H__ +#define __NV40_GRAPH_H__ + +/* returns 1 if device is one of the nv4x using the 0x4497 object class, + * helpful to determine a number of other hardware features + */ +static inline int +nv44_graph_class(void *priv) +{ + struct nouveau_device *device = nv_device(priv); + + if ((device->chipset & 0xf0) == 0x60) + return 1; + + return !(0x0baf & (1 << (device->chipset & 0x0f))); +} + +int nv40_grctx_init(struct nouveau_device *, u32 *size); +void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c new file mode 100644 index 0000000..1ac3611 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c @@ -0,0 +1,966 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> +#include <core/client.h> +#include <core/handle.h> +#include <core/engctx.h> +#include <core/enum.h> + +#include <subdev/fb.h> +#include <subdev/vm.h> +#include <subdev/timer.h> + +#include <engine/fifo.h> +#include <engine/graph.h> + +#include "nv50.h" + +struct nv50_graph_priv { + struct nouveau_graph base; + spinlock_t lock; + u32 size; +}; + +struct nv50_graph_chan { + struct nouveau_graph_chan base; +}; + +static u64 +nv50_graph_units(struct nouveau_graph *graph) +{ + struct nv50_graph_priv *priv = (void *)graph; + + return nv_rd32(priv, 0x1540); +} + +/******************************************************************************* + * Graphics object classes + ******************************************************************************/ + +static int +nv50_graph_object_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_gpuobj *obj; + int ret; + + ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent, + 16, 16, 0, &obj); + *pobject = nv_object(obj); + if (ret) + return ret; + + nv_wo32(obj, 0x00, nv_mclass(obj)); + nv_wo32(obj, 0x04, 0x00000000); + nv_wo32(obj, 0x08, 0x00000000); + nv_wo32(obj, 0x0c, 0x00000000); + return 0; +} + +static struct nouveau_ofuncs +nv50_graph_ofuncs = { + .ctor = nv50_graph_object_ctor, + .dtor = _nouveau_gpuobj_dtor, + .init = _nouveau_gpuobj_init, + .fini = _nouveau_gpuobj_fini, + .rd32 = _nouveau_gpuobj_rd32, + .wr32 = _nouveau_gpuobj_wr32, +}; + +static struct nouveau_oclass +nv50_graph_sclass[] = { + { 0x0030, &nv50_graph_ofuncs }, + { 0x502d, &nv50_graph_ofuncs }, + { 0x5039, &nv50_graph_ofuncs }, + { 0x5097, &nv50_graph_ofuncs }, + { 0x50c0, &nv50_graph_ofuncs }, + {} +}; + +static struct nouveau_oclass +nv84_graph_sclass[] = { + { 0x0030, &nv50_graph_ofuncs }, + { 0x502d, &nv50_graph_ofuncs }, + { 0x5039, &nv50_graph_ofuncs }, + { 0x50c0, &nv50_graph_ofuncs }, + { 0x8297, &nv50_graph_ofuncs }, + {} +}; + +static struct nouveau_oclass +nva0_graph_sclass[] = { + { 0x0030, &nv50_graph_ofuncs }, + { 0x502d, &nv50_graph_ofuncs }, + { 0x5039, &nv50_graph_ofuncs }, + { 0x50c0, &nv50_graph_ofuncs }, + { 0x8397, &nv50_graph_ofuncs }, + {} +}; + +static struct nouveau_oclass +nva3_graph_sclass[] = { + { 0x0030, &nv50_graph_ofuncs }, + { 0x502d, &nv50_graph_ofuncs }, + { 0x5039, &nv50_graph_ofuncs }, + { 0x50c0, &nv50_graph_ofuncs }, + { 0x8597, &nv50_graph_ofuncs }, + { 0x85c0, &nv50_graph_ofuncs }, + {} +}; + +static struct nouveau_oclass +nvaf_graph_sclass[] = { + { 0x0030, &nv50_graph_ofuncs }, + { 0x502d, &nv50_graph_ofuncs }, + { 0x5039, &nv50_graph_ofuncs }, + { 0x50c0, &nv50_graph_ofuncs }, + { 0x85c0, &nv50_graph_ofuncs }, + { 0x8697, &nv50_graph_ofuncs }, + {} +}; + +/******************************************************************************* + * PGRAPH context + ******************************************************************************/ + +static int +nv50_graph_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_graph_priv *priv = (void *)engine; + struct nv50_graph_chan *chan; + int ret; + + ret = nouveau_graph_context_create(parent, engine, oclass, NULL, + priv->size, 0, + NVOBJ_FLAG_ZERO_ALLOC, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + nv50_grctx_fill(nv_device(priv), nv_gpuobj(chan)); + return 0; +} + +static struct nouveau_oclass +nv50_graph_cclass = { + .handle = NV_ENGCTX(GR, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_graph_context_ctor, + .dtor = _nouveau_graph_context_dtor, + .init = _nouveau_graph_context_init, + .fini = _nouveau_graph_context_fini, + .rd32 = _nouveau_graph_context_rd32, + .wr32 = _nouveau_graph_context_wr32, + }, +}; + +/******************************************************************************* + * PGRAPH engine/subdev functions + ******************************************************************************/ + +static int +nv50_graph_tlb_flush(struct nouveau_engine *engine) +{ + nv50_vm_flush_engine(&engine->base, 0x00); + return 0; +} + +static const struct nouveau_bitfield nv50_pgraph_status[] = { + { 0x00000001, "BUSY" }, /* set when any bit is set */ + { 0x00000002, "DISPATCH" }, + { 0x00000004, "UNK2" }, + { 0x00000008, "UNK3" }, + { 0x00000010, "UNK4" }, + { 0x00000020, "UNK5" }, + { 0x00000040, "M2MF" }, + { 0x00000080, "UNK7" }, + { 0x00000100, "CTXPROG" }, + { 0x00000200, "VFETCH" }, + { 0x00000400, "CCACHE_UNK4" }, + { 0x00000800, "STRMOUT_GSCHED_UNK5" }, + { 0x00001000, "UNK14XX" }, + { 0x00002000, "UNK24XX_CSCHED" }, + { 0x00004000, "UNK1CXX" }, + { 0x00008000, "CLIPID" }, + { 0x00010000, "ZCULL" }, + { 0x00020000, "ENG2D" }, + { 0x00040000, "UNK34XX" }, + { 0x00080000, "TPRAST" }, + { 0x00100000, "TPROP" }, + { 0x00200000, "TEX" }, + { 0x00400000, "TPVP" }, + { 0x00800000, "MP" }, + { 0x01000000, "ROP" }, + {} +}; + +static const char *const nv50_pgraph_vstatus_0[] = { + "VFETCH", "CCACHE", "UNK4", "UNK5", "GSCHED", "STRMOUT", "UNK14XX", NULL +}; + +static const char *const nv50_pgraph_vstatus_1[] = { + "TPRAST", "TPROP", "TEXTURE", "TPVP", "MP", NULL +}; + +static const char *const nv50_pgraph_vstatus_2[] = { + "UNK24XX", "CSCHED", "UNK1CXX", "CLIPID", "ZCULL", "ENG2D", "UNK34XX", + "ROP", NULL +}; + +static void nouveau_pgraph_vstatus_print(struct nv50_graph_priv *priv, int r, + const char *const units[], u32 status) +{ + int i; + + nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status); + + for (i = 0; units[i] && status; i++) { + if ((status & 7) == 1) + pr_cont(" %s", units[i]); + status >>= 3; + } + if (status) + pr_cont(" (invalid: 0x%x)", status); + pr_cont("\n"); +} + +static int +nv84_graph_tlb_flush(struct nouveau_engine *engine) +{ + struct nouveau_timer *ptimer = nouveau_timer(engine); + struct nv50_graph_priv *priv = (void *)engine; + bool idle, timeout = false; + unsigned long flags; + u64 start; + u32 tmp; + + spin_lock_irqsave(&priv->lock, flags); + nv_mask(priv, 0x400500, 0x00000001, 0x00000000); + + start = ptimer->read(ptimer); + do { + idle = true; + + for (tmp = nv_rd32(priv, 0x400380); tmp && idle; tmp >>= 3) { + if ((tmp & 7) == 1) + idle = false; + } + + for (tmp = nv_rd32(priv, 0x400384); tmp && idle; tmp >>= 3) { + if ((tmp & 7) == 1) + idle = false; + } + + for (tmp = nv_rd32(priv, 0x400388); tmp && idle; tmp >>= 3) { + if ((tmp & 7) == 1) + idle = false; + } + } while (!idle && + !(timeout = ptimer->read(ptimer) - start > 2000000000)); + + if (timeout) { + nv_error(priv, "PGRAPH TLB flush idle timeout fail\n"); + + tmp = nv_rd32(priv, 0x400700); + nv_error(priv, "PGRAPH_STATUS : 0x%08x", tmp); + nouveau_bitfield_print(nv50_pgraph_status, tmp); + pr_cont("\n"); + + nouveau_pgraph_vstatus_print(priv, 0, nv50_pgraph_vstatus_0, + nv_rd32(priv, 0x400380)); + nouveau_pgraph_vstatus_print(priv, 1, nv50_pgraph_vstatus_1, + nv_rd32(priv, 0x400384)); + nouveau_pgraph_vstatus_print(priv, 2, nv50_pgraph_vstatus_2, + nv_rd32(priv, 0x400388)); + } + + nv50_vm_flush_engine(&engine->base, 0x00); + + nv_mask(priv, 0x400500, 0x00000001, 0x00000001); + spin_unlock_irqrestore(&priv->lock, flags); + return timeout ? -EBUSY : 0; +} + +static const struct nouveau_enum nv50_mp_exec_error_names[] = { + { 3, "STACK_UNDERFLOW", NULL }, + { 4, "QUADON_ACTIVE", NULL }, + { 8, "TIMEOUT", NULL }, + { 0x10, "INVALID_OPCODE", NULL }, + { 0x40, "BREAKPOINT", NULL }, + {} +}; + +static const struct nouveau_bitfield nv50_graph_trap_m2mf[] = { + { 0x00000001, "NOTIFY" }, + { 0x00000002, "IN" }, + { 0x00000004, "OUT" }, + {} +}; + +static const struct nouveau_bitfield nv50_graph_trap_vfetch[] = { + { 0x00000001, "FAULT" }, + {} +}; + +static const struct nouveau_bitfield nv50_graph_trap_strmout[] = { + { 0x00000001, "FAULT" }, + {} +}; + +static const struct nouveau_bitfield nv50_graph_trap_ccache[] = { + { 0x00000001, "FAULT" }, + {} +}; + +/* There must be a *lot* of these. Will take some time to gather them up. */ +const struct nouveau_enum nv50_data_error_names[] = { + { 0x00000003, "INVALID_OPERATION", NULL }, + { 0x00000004, "INVALID_VALUE", NULL }, + { 0x00000005, "INVALID_ENUM", NULL }, + { 0x00000008, "INVALID_OBJECT", NULL }, + { 0x00000009, "READ_ONLY_OBJECT", NULL }, + { 0x0000000a, "SUPERVISOR_OBJECT", NULL }, + { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL }, + { 0x0000000c, "INVALID_BITFIELD", NULL }, + { 0x0000000d, "BEGIN_END_ACTIVE", NULL }, + { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL }, + { 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL }, + { 0x00000010, "RT_DOUBLE_BIND", NULL }, + { 0x00000011, "RT_TYPES_MISMATCH", NULL }, + { 0x00000012, "RT_LINEAR_WITH_ZETA", NULL }, + { 0x00000015, "FP_TOO_FEW_REGS", NULL }, + { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL }, + { 0x00000017, "RT_LINEAR_WITH_MSAA", NULL }, + { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL }, + { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL }, + { 0x0000001a, "RT_INVALID_ALIGNMENT", NULL }, + { 0x0000001b, "SAMPLER_OVER_LIMIT", NULL }, + { 0x0000001c, "TEXTURE_OVER_LIMIT", NULL }, + { 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL }, + { 0x0000001f, "RT_BPP128_WITH_MS8", NULL }, + { 0x00000021, "Z_OUT_OF_BOUNDS", NULL }, + { 0x00000023, "XY_OUT_OF_BOUNDS", NULL }, + { 0x00000024, "VP_ZERO_INPUTS", NULL }, + { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL }, + { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL }, + { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL }, + { 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL }, + { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL }, + { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL }, + { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL }, + { 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL }, + { 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL }, + { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL }, + { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL }, + { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL }, + { 0x00000046, "LAYER_ID_NEEDS_GP", NULL }, + { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL }, + { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL }, + {} +}; + +static const struct nouveau_bitfield nv50_graph_intr_name[] = { + { 0x00000001, "NOTIFY" }, + { 0x00000002, "COMPUTE_QUERY" }, + { 0x00000010, "ILLEGAL_MTHD" }, + { 0x00000020, "ILLEGAL_CLASS" }, + { 0x00000040, "DOUBLE_NOTIFY" }, + { 0x00001000, "CONTEXT_SWITCH" }, + { 0x00010000, "BUFFER_NOTIFY" }, + { 0x00100000, "DATA_ERROR" }, + { 0x00200000, "TRAP" }, + { 0x01000000, "SINGLE_STEP" }, + {} +}; + +static void +nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display) +{ + u32 units = nv_rd32(priv, 0x1540); + u32 addr, mp10, status, pc, oplow, ophigh; + int i; + int mps = 0; + for (i = 0; i < 4; i++) { + if (!(units & 1 << (i+24))) + continue; + if (nv_device(priv)->chipset < 0xa0) + addr = 0x408200 + (tpid << 12) + (i << 7); + else + addr = 0x408100 + (tpid << 11) + (i << 7); + mp10 = nv_rd32(priv, addr + 0x10); + status = nv_rd32(priv, addr + 0x14); + if (!status) + continue; + if (display) { + nv_rd32(priv, addr + 0x20); + pc = nv_rd32(priv, addr + 0x24); + oplow = nv_rd32(priv, addr + 0x70); + ophigh = nv_rd32(priv, addr + 0x74); + nv_error(priv, "TRAP_MP_EXEC - " + "TP %d MP %d: ", tpid, i); + nouveau_enum_print(nv50_mp_exec_error_names, status); + pr_cont(" at %06x warp %d, opcode %08x %08x\n", + pc&0xffffff, pc >> 24, + oplow, ophigh); + } + nv_wr32(priv, addr + 0x10, mp10); + nv_wr32(priv, addr + 0x14, 0); + mps++; + } + if (!mps && display) + nv_error(priv, "TRAP_MP_EXEC - TP %d: " + "No MPs claiming errors?\n", tpid); +} + +static void +nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old, + u32 ustatus_new, int display, const char *name) +{ + int tps = 0; + u32 units = nv_rd32(priv, 0x1540); + int i, r; + u32 ustatus_addr, ustatus; + for (i = 0; i < 16; i++) { + if (!(units & (1 << i))) + continue; + if (nv_device(priv)->chipset < 0xa0) + ustatus_addr = ustatus_old + (i << 12); + else + ustatus_addr = ustatus_new + (i << 11); + ustatus = nv_rd32(priv, ustatus_addr) & 0x7fffffff; + if (!ustatus) + continue; + tps++; + switch (type) { + case 6: /* texture error... unknown for now */ + if (display) { + nv_error(priv, "magic set %d:\n", i); + for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) + nv_error(priv, "\t0x%08x: 0x%08x\n", r, + nv_rd32(priv, r)); + } + break; + case 7: /* MP error */ + if (ustatus & 0x04030000) { + nv50_priv_mp_trap(priv, i, display); + ustatus &= ~0x04030000; + } + break; + case 8: /* TPDMA error */ + { + u32 e0c = nv_rd32(priv, ustatus_addr + 4); + u32 e10 = nv_rd32(priv, ustatus_addr + 8); + u32 e14 = nv_rd32(priv, ustatus_addr + 0xc); + u32 e18 = nv_rd32(priv, ustatus_addr + 0x10); + u32 e1c = nv_rd32(priv, ustatus_addr + 0x14); + u32 e20 = nv_rd32(priv, ustatus_addr + 0x18); + u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c); + /* 2d engine destination */ + if (ustatus & 0x00000010) { + if (display) { + nv_error(priv, "TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", + i, e14, e10); + nv_error(priv, "TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", + i, e0c, e18, e1c, e20, e24); + } + ustatus &= ~0x00000010; + } + /* Render target */ + if (ustatus & 0x00000040) { + if (display) { + nv_error(priv, "TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", + i, e14, e10); + nv_error(priv, "TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", + i, e0c, e18, e1c, e20, e24); + } + ustatus &= ~0x00000040; + } + /* CUDA memory: l[], g[] or stack. */ + if (ustatus & 0x00000080) { + if (display) { + if (e18 & 0x80000000) { + /* g[] read fault? */ + nv_error(priv, "TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", + i, e14, e10 | ((e18 >> 24) & 0x1f)); + e18 &= ~0x1f000000; + } else if (e18 & 0xc) { + /* g[] write fault? */ + nv_error(priv, "TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", + i, e14, e10 | ((e18 >> 7) & 0x1f)); + e18 &= ~0x00000f80; + } else { + nv_error(priv, "TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", + i, e14, e10); + } + nv_error(priv, "TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", + i, e0c, e18, e1c, e20, e24); + } + ustatus &= ~0x00000080; + } + } + break; + } + if (ustatus) { + if (display) + nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); + } + nv_wr32(priv, ustatus_addr, 0xc0000000); + } + + if (!tps && display) + nv_warn(priv, "%s - No TPs claiming errors?\n", name); +} + +static int +nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display, + int chid, u64 inst, struct nouveau_object *engctx) +{ + u32 status = nv_rd32(priv, 0x400108); + u32 ustatus; + + if (!status && display) { + nv_error(priv, "TRAP: no units reporting traps?\n"); + return 1; + } + + /* DISPATCH: Relays commands to other units and handles NOTIFY, + * COND, QUERY. If you get a trap from it, the command is still stuck + * in DISPATCH and you need to do something about it. */ + if (status & 0x001) { + ustatus = nv_rd32(priv, 0x400804) & 0x7fffffff; + if (!ustatus && display) { + nv_error(priv, "TRAP_DISPATCH - no ustatus?\n"); + } + + nv_wr32(priv, 0x400500, 0x00000000); + + /* Known to be triggered by screwed up NOTIFY and COND... */ + if (ustatus & 0x00000001) { + u32 addr = nv_rd32(priv, 0x400808); + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 datal = nv_rd32(priv, 0x40080c); + u32 datah = nv_rd32(priv, 0x400810); + u32 class = nv_rd32(priv, 0x400814); + u32 r848 = nv_rd32(priv, 0x400848); + + nv_error(priv, "TRAP DISPATCH_FAULT\n"); + if (display && (addr & 0x80000000)) { + nv_error(priv, + "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x%08x 400808 0x%08x 400848 0x%08x\n", + chid, inst, + nouveau_client_name(engctx), subc, + class, mthd, datah, datal, addr, r848); + } else + if (display) { + nv_error(priv, "no stuck command?\n"); + } + + nv_wr32(priv, 0x400808, 0); + nv_wr32(priv, 0x4008e8, nv_rd32(priv, 0x4008e8) & 3); + nv_wr32(priv, 0x400848, 0); + ustatus &= ~0x00000001; + } + + if (ustatus & 0x00000002) { + u32 addr = nv_rd32(priv, 0x40084c); + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(priv, 0x40085c); + u32 class = nv_rd32(priv, 0x400814); + + nv_error(priv, "TRAP DISPATCH_QUERY\n"); + if (display && (addr & 0x80000000)) { + nv_error(priv, + "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x 40084c 0x%08x\n", + chid, inst, + nouveau_client_name(engctx), subc, + class, mthd, data, addr); + } else + if (display) { + nv_error(priv, "no stuck command?\n"); + } + + nv_wr32(priv, 0x40084c, 0); + ustatus &= ~0x00000002; + } + + if (ustatus && display) { + nv_error(priv, "TRAP_DISPATCH (unknown " + "0x%08x)\n", ustatus); + } + + nv_wr32(priv, 0x400804, 0xc0000000); + nv_wr32(priv, 0x400108, 0x001); + status &= ~0x001; + if (!status) + return 0; + } + + /* M2MF: Memory to memory copy engine. */ + if (status & 0x002) { + u32 ustatus = nv_rd32(priv, 0x406800) & 0x7fffffff; + if (display) { + nv_error(priv, "TRAP_M2MF"); + nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus); + pr_cont("\n"); + nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n", + nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808), + nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810)); + + } + + /* No sane way found yet -- just reset the bugger. */ + nv_wr32(priv, 0x400040, 2); + nv_wr32(priv, 0x400040, 0); + nv_wr32(priv, 0x406800, 0xc0000000); + nv_wr32(priv, 0x400108, 0x002); + status &= ~0x002; + } + + /* VFETCH: Fetches data from vertex buffers. */ + if (status & 0x004) { + u32 ustatus = nv_rd32(priv, 0x400c04) & 0x7fffffff; + if (display) { + nv_error(priv, "TRAP_VFETCH"); + nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus); + pr_cont("\n"); + nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n", + nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08), + nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10)); + } + + nv_wr32(priv, 0x400c04, 0xc0000000); + nv_wr32(priv, 0x400108, 0x004); + status &= ~0x004; + } + + /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ + if (status & 0x008) { + ustatus = nv_rd32(priv, 0x401800) & 0x7fffffff; + if (display) { + nv_error(priv, "TRAP_STRMOUT"); + nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus); + pr_cont("\n"); + nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n", + nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808), + nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810)); + + } + + /* No sane way found yet -- just reset the bugger. */ + nv_wr32(priv, 0x400040, 0x80); + nv_wr32(priv, 0x400040, 0); + nv_wr32(priv, 0x401800, 0xc0000000); + nv_wr32(priv, 0x400108, 0x008); + status &= ~0x008; + } + + /* CCACHE: Handles code and c[] caches and fills them. */ + if (status & 0x010) { + ustatus = nv_rd32(priv, 0x405018) & 0x7fffffff; + if (display) { + nv_error(priv, "TRAP_CCACHE"); + nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus); + pr_cont("\n"); + nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x" + " %08x %08x %08x\n", + nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004), + nv_rd32(priv, 0x405008), nv_rd32(priv, 0x40500c), + nv_rd32(priv, 0x405010), nv_rd32(priv, 0x405014), + nv_rd32(priv, 0x40501c)); + + } + + nv_wr32(priv, 0x405018, 0xc0000000); + nv_wr32(priv, 0x400108, 0x010); + status &= ~0x010; + } + + /* Unknown, not seen yet... 0x402000 is the only trap status reg + * remaining, so try to handle it anyway. Perhaps related to that + * unknown DMA slot on tesla? */ + if (status & 0x20) { + ustatus = nv_rd32(priv, 0x402000) & 0x7fffffff; + if (display) + nv_error(priv, "TRAP_UNKC04 0x%08x\n", ustatus); + nv_wr32(priv, 0x402000, 0xc0000000); + /* no status modifiction on purpose */ + } + + /* TEXTURE: CUDA texturing units */ + if (status & 0x040) { + nv50_priv_tp_trap(priv, 6, 0x408900, 0x408600, display, + "TRAP_TEXTURE"); + nv_wr32(priv, 0x400108, 0x040); + status &= ~0x040; + } + + /* MP: CUDA execution engines. */ + if (status & 0x080) { + nv50_priv_tp_trap(priv, 7, 0x408314, 0x40831c, display, + "TRAP_MP"); + nv_wr32(priv, 0x400108, 0x080); + status &= ~0x080; + } + + /* TPDMA: Handles TP-initiated uncached memory accesses: + * l[], g[], stack, 2d surfaces, render targets. */ + if (status & 0x100) { + nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display, + "TRAP_TPDMA"); + nv_wr32(priv, 0x400108, 0x100); + status &= ~0x100; + } + + if (status) { + if (display) + nv_error(priv, "TRAP: unknown 0x%08x\n", status); + nv_wr32(priv, 0x400108, status); + } + + return 1; +} + +static void +nv50_graph_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_fifo *pfifo = nouveau_fifo(subdev); + struct nouveau_engine *engine = nv_engine(subdev); + struct nouveau_object *engctx; + struct nouveau_handle *handle = NULL; + struct nv50_graph_priv *priv = (void *)subdev; + u32 stat = nv_rd32(priv, 0x400100); + u32 inst = nv_rd32(priv, 0x40032c) & 0x0fffffff; + u32 addr = nv_rd32(priv, 0x400704); + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(priv, 0x400708); + u32 class = nv_rd32(priv, 0x400814); + u32 show = stat; + int chid; + + engctx = nouveau_engctx_get(engine, inst); + chid = pfifo->chid(pfifo, engctx); + + if (stat & 0x00000010) { + handle = nouveau_handle_get_class(engctx, class); + if (handle && !nv_call(handle->object, mthd, data)) + show &= ~0x00000010; + nouveau_handle_put(handle); + } + + if (show & 0x00100000) { + u32 ecode = nv_rd32(priv, 0x400110); + nv_error(priv, "DATA_ERROR "); + nouveau_enum_print(nv50_data_error_names, ecode); + pr_cont("\n"); + } + + if (stat & 0x00200000) { + if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12, + engctx)) + show &= ~0x00200000; + } + + nv_wr32(priv, 0x400100, stat); + nv_wr32(priv, 0x400500, 0x00010001); + + if (show) { + nv_error(priv, "%s", ""); + nouveau_bitfield_print(nv50_graph_intr_name, show); + pr_cont("\n"); + nv_error(priv, + "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", + chid, (u64)inst << 12, nouveau_client_name(engctx), + subc, class, mthd, data); + } + + if (nv_rd32(priv, 0x400824) & (1 << 31)) + nv_wr32(priv, 0x400824, nv_rd32(priv, 0x400824) & ~(1 << 31)); + + nouveau_engctx_put(engctx); +} + +static int +nv50_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_graph_priv *priv; + int ret; + + ret = nouveau_graph_create(parent, engine, oclass, true, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00201000; + nv_subdev(priv)->intr = nv50_graph_intr; + nv_engine(priv)->cclass = &nv50_graph_cclass; + + priv->base.units = nv50_graph_units; + + switch (nv_device(priv)->chipset) { + case 0x50: + nv_engine(priv)->sclass = nv50_graph_sclass; + break; + case 0x84: + case 0x86: + case 0x92: + case 0x94: + case 0x96: + case 0x98: + nv_engine(priv)->sclass = nv84_graph_sclass; + break; + case 0xa0: + case 0xaa: + case 0xac: + nv_engine(priv)->sclass = nva0_graph_sclass; + break; + case 0xa3: + case 0xa5: + case 0xa8: + nv_engine(priv)->sclass = nva3_graph_sclass; + break; + case 0xaf: + nv_engine(priv)->sclass = nvaf_graph_sclass; + break; + + }; + + if (nv_device(priv)->chipset == 0x50 || + nv_device(priv)->chipset == 0xac) + nv_engine(priv)->tlb_flush = nv50_graph_tlb_flush; + else + nv_engine(priv)->tlb_flush = nv84_graph_tlb_flush; + + spin_lock_init(&priv->lock); + return 0; +} + +static int +nv50_graph_init(struct nouveau_object *object) +{ + struct nv50_graph_priv *priv = (void *)object; + int ret, units, i; + + ret = nouveau_graph_init(&priv->base); + if (ret) + return ret; + + /* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */ + nv_wr32(priv, 0x40008c, 0x00000004); + + /* reset/enable traps and interrupts */ + nv_wr32(priv, 0x400804, 0xc0000000); + nv_wr32(priv, 0x406800, 0xc0000000); + nv_wr32(priv, 0x400c04, 0xc0000000); + nv_wr32(priv, 0x401800, 0xc0000000); + nv_wr32(priv, 0x405018, 0xc0000000); + nv_wr32(priv, 0x402000, 0xc0000000); + + units = nv_rd32(priv, 0x001540); + for (i = 0; i < 16; i++) { + if (!(units & (1 << i))) + continue; + + if (nv_device(priv)->chipset < 0xa0) { + nv_wr32(priv, 0x408900 + (i << 12), 0xc0000000); + nv_wr32(priv, 0x408e08 + (i << 12), 0xc0000000); + nv_wr32(priv, 0x408314 + (i << 12), 0xc0000000); + } else { + nv_wr32(priv, 0x408600 + (i << 11), 0xc0000000); + nv_wr32(priv, 0x408708 + (i << 11), 0xc0000000); + nv_wr32(priv, 0x40831c + (i << 11), 0xc0000000); + } + } + + nv_wr32(priv, 0x400108, 0xffffffff); + nv_wr32(priv, 0x400138, 0xffffffff); + nv_wr32(priv, 0x400100, 0xffffffff); + nv_wr32(priv, 0x40013c, 0xffffffff); + nv_wr32(priv, 0x400500, 0x00010001); + + /* upload context program, initialise ctxctl defaults */ + ret = nv50_grctx_init(nv_device(priv), &priv->size); + if (ret) + return ret; + + nv_wr32(priv, 0x400824, 0x00000000); + nv_wr32(priv, 0x400828, 0x00000000); + nv_wr32(priv, 0x40082c, 0x00000000); + nv_wr32(priv, 0x400830, 0x00000000); + nv_wr32(priv, 0x40032c, 0x00000000); + nv_wr32(priv, 0x400330, 0x00000000); + + /* some unknown zcull magic */ + switch (nv_device(priv)->chipset & 0xf0) { + case 0x50: + case 0x80: + case 0x90: + nv_wr32(priv, 0x402ca8, 0x00000800); + break; + case 0xa0: + default: + nv_wr32(priv, 0x402cc0, 0x00000000); + if (nv_device(priv)->chipset == 0xa0 || + nv_device(priv)->chipset == 0xaa || + nv_device(priv)->chipset == 0xac) { + nv_wr32(priv, 0x402ca8, 0x00000802); + } else { + nv_wr32(priv, 0x402cc0, 0x00000000); + nv_wr32(priv, 0x402ca8, 0x00000002); + } + + break; + } + + /* zero out zcull regions */ + for (i = 0; i < 8; i++) { + nv_wr32(priv, 0x402c20 + (i * 8), 0x00000000); + nv_wr32(priv, 0x402c24 + (i * 8), 0x00000000); + nv_wr32(priv, 0x402c28 + (i * 8), 0x00000000); + nv_wr32(priv, 0x402c2c + (i * 8), 0x00000000); + } + return 0; +} + +struct nouveau_oclass +nv50_graph_oclass = { + .handle = NV_ENGINE(GR, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_graph_ctor, + .dtor = _nouveau_graph_dtor, + .init = nv50_graph_init, + .fini = _nouveau_graph_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h new file mode 100644 index 0000000..0505fb4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h @@ -0,0 +1,7 @@ +#ifndef __NV50_GRAPH_H__ +#define __NV50_GRAPH_H__ + +int nv50_grctx_init(struct nouveau_device *, u32 *size); +void nv50_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c new file mode 100644 index 0000000..f9b9d82 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c @@ -0,0 +1,964 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "nvc0.h" +#include "fuc/hubnvc0.fuc.h" +#include "fuc/gpcnvc0.fuc.h" + +/******************************************************************************* + * Graphics object classes + ******************************************************************************/ + +static struct nouveau_oclass +nvc0_graph_sclass[] = { + { 0x902d, &nouveau_object_ofuncs }, + { 0x9039, &nouveau_object_ofuncs }, + { 0x9097, &nouveau_object_ofuncs }, + { 0x90c0, &nouveau_object_ofuncs }, + {} +}; + +static struct nouveau_oclass +nvc1_graph_sclass[] = { + { 0x902d, &nouveau_object_ofuncs }, + { 0x9039, &nouveau_object_ofuncs }, + { 0x9097, &nouveau_object_ofuncs }, + { 0x90c0, &nouveau_object_ofuncs }, + { 0x9197, &nouveau_object_ofuncs }, + {} +}; + +static struct nouveau_oclass +nvc8_graph_sclass[] = { + { 0x902d, &nouveau_object_ofuncs }, + { 0x9039, &nouveau_object_ofuncs }, + { 0x9097, &nouveau_object_ofuncs }, + { 0x90c0, &nouveau_object_ofuncs }, + { 0x9197, &nouveau_object_ofuncs }, + { 0x9297, &nouveau_object_ofuncs }, + {} +}; + +u64 +nvc0_graph_units(struct nouveau_graph *graph) +{ + struct nvc0_graph_priv *priv = (void *)graph; + u64 cfg; + + cfg = (u32)priv->gpc_nr; + cfg |= (u32)priv->tpc_total << 8; + cfg |= (u64)priv->rop_nr << 32; + + return cfg; +} + +/******************************************************************************* + * PGRAPH context + ******************************************************************************/ + +int +nvc0_graph_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *args, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_vm *vm = nouveau_client(parent)->vm; + struct nvc0_graph_priv *priv = (void *)engine; + struct nvc0_graph_data *data = priv->mmio_data; + struct nvc0_graph_mmio *mmio = priv->mmio_list; + struct nvc0_graph_chan *chan; + int ret, i; + + /* allocate memory for context, and fill with default values */ + ret = nouveau_graph_context_create(parent, engine, oclass, NULL, + priv->size, 0x100, + NVOBJ_FLAG_ZERO_ALLOC, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + /* allocate memory for a "mmio list" buffer that's used by the HUB + * fuc to modify some per-context register settings on first load + * of the context. + */ + ret = nouveau_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0, + &chan->mmio); + if (ret) + return ret; + + ret = nouveau_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm, + NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS, + &chan->mmio_vma); + if (ret) + return ret; + + /* allocate buffers referenced by mmio list */ + for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) { + ret = nouveau_gpuobj_new(nv_object(chan), NULL, data->size, + data->align, 0, &chan->data[i].mem); + if (ret) + return ret; + + ret = nouveau_gpuobj_map_vm(chan->data[i].mem, vm, data->access, + &chan->data[i].vma); + if (ret) + return ret; + + data++; + } + + /* finally, fill in the mmio list and point the context at it */ + for (i = 0; mmio->addr && i < ARRAY_SIZE(priv->mmio_list); i++) { + u32 addr = mmio->addr; + u32 data = mmio->data; + + if (mmio->shift) { + u64 info = chan->data[mmio->buffer].vma.offset; + data |= info >> mmio->shift; + } + + nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr); + nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data); + mmio++; + } + + for (i = 0; i < priv->size; i += 4) + nv_wo32(chan, i, priv->data[i / 4]); + + if (!priv->firmware) { + nv_wo32(chan, 0x00, chan->mmio_nr / 2); + nv_wo32(chan, 0x04, chan->mmio_vma.offset >> 8); + } else { + nv_wo32(chan, 0xf4, 0); + nv_wo32(chan, 0xf8, 0); + nv_wo32(chan, 0x10, chan->mmio_nr / 2); + nv_wo32(chan, 0x14, lower_32_bits(chan->mmio_vma.offset)); + nv_wo32(chan, 0x18, upper_32_bits(chan->mmio_vma.offset)); + nv_wo32(chan, 0x1c, 1); + nv_wo32(chan, 0x20, 0); + nv_wo32(chan, 0x28, 0); + nv_wo32(chan, 0x2c, 0); + } + + return 0; +} + +void +nvc0_graph_context_dtor(struct nouveau_object *object) +{ + struct nvc0_graph_chan *chan = (void *)object; + int i; + + for (i = 0; i < ARRAY_SIZE(chan->data); i++) { + nouveau_gpuobj_unmap(&chan->data[i].vma); + nouveau_gpuobj_ref(NULL, &chan->data[i].mem); + } + + nouveau_gpuobj_unmap(&chan->mmio_vma); + nouveau_gpuobj_ref(NULL, &chan->mmio); + + nouveau_graph_context_destroy(&chan->base); +} + +static struct nouveau_oclass +nvc0_graph_cclass = { + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_graph_context_ctor, + .dtor = nvc0_graph_context_dtor, + .init = _nouveau_graph_context_init, + .fini = _nouveau_graph_context_fini, + .rd32 = _nouveau_graph_context_rd32, + .wr32 = _nouveau_graph_context_wr32, + }, +}; + +/******************************************************************************* + * PGRAPH engine/subdev functions + ******************************************************************************/ + +static void +nvc0_graph_ctxctl_debug_unit(struct nvc0_graph_priv *priv, u32 base) +{ + nv_error(priv, "%06x - done 0x%08x\n", base, + nv_rd32(priv, base + 0x400)); + nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base, + nv_rd32(priv, base + 0x800), nv_rd32(priv, base + 0x804), + nv_rd32(priv, base + 0x808), nv_rd32(priv, base + 0x80c)); + nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base, + nv_rd32(priv, base + 0x810), nv_rd32(priv, base + 0x814), + nv_rd32(priv, base + 0x818), nv_rd32(priv, base + 0x81c)); +} + +void +nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *priv) +{ + u32 gpcnr = nv_rd32(priv, 0x409604) & 0xffff; + u32 gpc; + + nvc0_graph_ctxctl_debug_unit(priv, 0x409000); + for (gpc = 0; gpc < gpcnr; gpc++) + nvc0_graph_ctxctl_debug_unit(priv, 0x502000 + (gpc * 0x8000)); +} + +static void +nvc0_graph_ctxctl_isr(struct nvc0_graph_priv *priv) +{ + u32 ustat = nv_rd32(priv, 0x409c18); + + if (ustat & 0x00000001) + nv_error(priv, "CTXCTRL ucode error\n"); + if (ustat & 0x00080000) + nv_error(priv, "CTXCTRL watchdog timeout\n"); + if (ustat & ~0x00080001) + nv_error(priv, "CTXCTRL 0x%08x\n", ustat); + + nvc0_graph_ctxctl_debug(priv); + nv_wr32(priv, 0x409c20, ustat); +} + +static void +nvc0_graph_trap_tpc(struct nvc0_graph_priv *priv, int gpc, int tpc) +{ + u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0508)); + + if (stat & 0x00000001) { + u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0224)); + nv_error(priv, "GPC%d/TPC%d/TEX: 0x%08x\n", gpc, tpc, trap); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000001); + stat &= ~0x00000001; + } + + if (stat & 0x00000002) { + u32 trap0 = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0644)); + u32 trap1 = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x064c)); + nv_error(priv, "GPC%d/TPC%d/MP: 0x%08x 0x%08x\n", + gpc, tpc, trap0, trap1); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0644), 0x001ffffe); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x064c), 0x0000000f); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000002); + stat &= ~0x00000002; + } + + if (stat & 0x00000004) { + u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0084)); + nv_error(priv, "GPC%d/TPC%d/POLY: 0x%08x\n", gpc, tpc, trap); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000004); + stat &= ~0x00000004; + } + + if (stat & 0x00000008) { + u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x048c)); + nv_error(priv, "GPC%d/TPC%d/L1C: 0x%08x\n", gpc, tpc, trap); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000008); + stat &= ~0x00000008; + } + + if (stat) { + nv_error(priv, "GPC%d/TPC%d/0x%08x: unknown\n", gpc, tpc, stat); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), stat); + } +} + +static void +nvc0_graph_trap_gpc(struct nvc0_graph_priv *priv, int gpc) +{ + u32 stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90)); + int tpc; + + if (stat & 0x00000001) { + u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0420)); + nv_error(priv, "GPC%d/PROP: 0x%08x\n", gpc, trap); + nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000); + nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000001); + stat &= ~0x00000001; + } + + if (stat & 0x00000002) { + u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0900)); + nv_error(priv, "GPC%d/ZCULL: 0x%08x\n", gpc, trap); + nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000); + nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000002); + stat &= ~0x00000002; + } + + if (stat & 0x00000004) { + u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x1028)); + nv_error(priv, "GPC%d/CCACHE: 0x%08x\n", gpc, trap); + nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000); + nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000004); + stat &= ~0x00000004; + } + + if (stat & 0x00000008) { + u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0824)); + nv_error(priv, "GPC%d/ESETUP: 0x%08x\n", gpc, trap); + nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000); + nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000008); + stat &= ~0x00000009; + } + + for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { + u32 mask = 0x00010000 << tpc; + if (stat & mask) { + nvc0_graph_trap_tpc(priv, gpc, tpc); + nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), mask); + stat &= ~mask; + } + } + + if (stat) { + nv_error(priv, "GPC%d/0x%08x: unknown\n", gpc, stat); + nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), stat); + } +} + +static void +nvc0_graph_trap_intr(struct nvc0_graph_priv *priv) +{ + u32 trap = nv_rd32(priv, 0x400108); + int rop, gpc; + + if (trap & 0x00000001) { + u32 stat = nv_rd32(priv, 0x404000); + nv_error(priv, "DISPATCH 0x%08x\n", stat); + nv_wr32(priv, 0x404000, 0xc0000000); + nv_wr32(priv, 0x400108, 0x00000001); + trap &= ~0x00000001; + } + + if (trap & 0x00000002) { + u32 stat = nv_rd32(priv, 0x404600); + nv_error(priv, "M2MF 0x%08x\n", stat); + nv_wr32(priv, 0x404600, 0xc0000000); + nv_wr32(priv, 0x400108, 0x00000002); + trap &= ~0x00000002; + } + + if (trap & 0x00000008) { + u32 stat = nv_rd32(priv, 0x408030); + nv_error(priv, "CCACHE 0x%08x\n", stat); + nv_wr32(priv, 0x408030, 0xc0000000); + nv_wr32(priv, 0x400108, 0x00000008); + trap &= ~0x00000008; + } + + if (trap & 0x00000010) { + u32 stat = nv_rd32(priv, 0x405840); + nv_error(priv, "SHADER 0x%08x\n", stat); + nv_wr32(priv, 0x405840, 0xc0000000); + nv_wr32(priv, 0x400108, 0x00000010); + trap &= ~0x00000010; + } + + if (trap & 0x00000040) { + u32 stat = nv_rd32(priv, 0x40601c); + nv_error(priv, "UNK6 0x%08x\n", stat); + nv_wr32(priv, 0x40601c, 0xc0000000); + nv_wr32(priv, 0x400108, 0x00000040); + trap &= ~0x00000040; + } + + if (trap & 0x00000080) { + u32 stat = nv_rd32(priv, 0x404490); + nv_error(priv, "MACRO 0x%08x\n", stat); + nv_wr32(priv, 0x404490, 0xc0000000); + nv_wr32(priv, 0x400108, 0x00000080); + trap &= ~0x00000080; + } + + if (trap & 0x01000000) { + u32 stat = nv_rd32(priv, 0x400118); + for (gpc = 0; stat && gpc < priv->gpc_nr; gpc++) { + u32 mask = 0x00000001 << gpc; + if (stat & mask) { + nvc0_graph_trap_gpc(priv, gpc); + nv_wr32(priv, 0x400118, mask); + stat &= ~mask; + } + } + nv_wr32(priv, 0x400108, 0x01000000); + trap &= ~0x01000000; + } + + if (trap & 0x02000000) { + for (rop = 0; rop < priv->rop_nr; rop++) { + u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070)); + u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144)); + nv_error(priv, "ROP%d 0x%08x 0x%08x\n", + rop, statz, statc); + nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000); + nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000); + } + nv_wr32(priv, 0x400108, 0x02000000); + trap &= ~0x02000000; + } + + if (trap) { + nv_error(priv, "TRAP UNHANDLED 0x%08x\n", trap); + nv_wr32(priv, 0x400108, trap); + } +} + +static void +nvc0_graph_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_fifo *pfifo = nouveau_fifo(subdev); + struct nouveau_engine *engine = nv_engine(subdev); + struct nouveau_object *engctx; + struct nouveau_handle *handle; + struct nvc0_graph_priv *priv = (void *)subdev; + u64 inst = nv_rd32(priv, 0x409b00) & 0x0fffffff; + u32 stat = nv_rd32(priv, 0x400100); + u32 addr = nv_rd32(priv, 0x400704); + u32 mthd = (addr & 0x00003ffc); + u32 subc = (addr & 0x00070000) >> 16; + u32 data = nv_rd32(priv, 0x400708); + u32 code = nv_rd32(priv, 0x400110); + u32 class = nv_rd32(priv, 0x404200 + (subc * 4)); + int chid; + + engctx = nouveau_engctx_get(engine, inst); + chid = pfifo->chid(pfifo, engctx); + + if (stat & 0x00000010) { + handle = nouveau_handle_get_class(engctx, class); + if (!handle || nv_call(handle->object, mthd, data)) { + nv_error(priv, + "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", + chid, inst << 12, nouveau_client_name(engctx), + subc, class, mthd, data); + } + nouveau_handle_put(handle); + nv_wr32(priv, 0x400100, 0x00000010); + stat &= ~0x00000010; + } + + if (stat & 0x00000020) { + nv_error(priv, + "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", + chid, inst << 12, nouveau_client_name(engctx), subc, + class, mthd, data); + nv_wr32(priv, 0x400100, 0x00000020); + stat &= ~0x00000020; + } + + if (stat & 0x00100000) { + nv_error(priv, "DATA_ERROR ["); + nouveau_enum_print(nv50_data_error_names, code); + pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", + chid, inst << 12, nouveau_client_name(engctx), subc, + class, mthd, data); + nv_wr32(priv, 0x400100, 0x00100000); + stat &= ~0x00100000; + } + + if (stat & 0x00200000) { + nv_error(priv, "TRAP ch %d [0x%010llx %s]\n", chid, inst << 12, + nouveau_client_name(engctx)); + nvc0_graph_trap_intr(priv); + nv_wr32(priv, 0x400100, 0x00200000); + stat &= ~0x00200000; + } + + if (stat & 0x00080000) { + nvc0_graph_ctxctl_isr(priv); + nv_wr32(priv, 0x400100, 0x00080000); + stat &= ~0x00080000; + } + + if (stat) { + nv_error(priv, "unknown stat 0x%08x\n", stat); + nv_wr32(priv, 0x400100, stat); + } + + nv_wr32(priv, 0x400500, 0x00010001); + nouveau_engctx_put(engctx); +} + +int +nvc0_graph_ctor_fw(struct nvc0_graph_priv *priv, const char *fwname, + struct nvc0_graph_fuc *fuc) +{ + struct nouveau_device *device = nv_device(priv); + const struct firmware *fw; + char f[32]; + int ret; + + snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname); + ret = request_firmware(&fw, f, &device->pdev->dev); + if (ret) { + snprintf(f, sizeof(f), "nouveau/%s", fwname); + ret = request_firmware(&fw, f, &device->pdev->dev); + if (ret) { + nv_error(priv, "failed to load %s\n", fwname); + return ret; + } + } + + fuc->size = fw->size; + fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL); + release_firmware(fw); + return (fuc->data != NULL) ? 0 : -ENOMEM; +} + +static int +nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct nvc0_graph_priv *priv; + bool enable = device->chipset != 0xd7; + int ret, i; + + ret = nouveau_graph_create(parent, engine, oclass, enable, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x18001000; + nv_subdev(priv)->intr = nvc0_graph_intr; + nv_engine(priv)->cclass = &nvc0_graph_cclass; + + priv->base.units = nvc0_graph_units; + + if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) { + nv_info(priv, "using external firmware\n"); + if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) || + nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) || + nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) || + nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad)) + return -EINVAL; + priv->firmware = true; + } + + switch (nvc0_graph_class(priv)) { + case 0x9097: + nv_engine(priv)->sclass = nvc0_graph_sclass; + break; + case 0x9197: + nv_engine(priv)->sclass = nvc1_graph_sclass; + break; + case 0x9297: + nv_engine(priv)->sclass = nvc8_graph_sclass; + break; + } + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0, + &priv->unk4188b4); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0, + &priv->unk4188b8); + if (ret) + return ret; + + for (i = 0; i < 0x1000; i += 4) { + nv_wo32(priv->unk4188b4, i, 0x00000010); + nv_wo32(priv->unk4188b8, i, 0x00000010); + } + + priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16; + priv->gpc_nr = nv_rd32(priv, 0x409604) & 0x0000001f; + for (i = 0; i < priv->gpc_nr; i++) { + priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608)); + priv->tpc_total += priv->tpc_nr[i]; + } + + /*XXX: these need figuring out... though it might not even matter */ + switch (nv_device(priv)->chipset) { + case 0xc0: + if (priv->tpc_total == 11) { /* 465, 3/4/4/0, 4 */ + priv->magic_not_rop_nr = 0x07; + } else + if (priv->tpc_total == 14) { /* 470, 3/3/4/4, 5 */ + priv->magic_not_rop_nr = 0x05; + } else + if (priv->tpc_total == 15) { /* 480, 3/4/4/4, 6 */ + priv->magic_not_rop_nr = 0x06; + } + break; + case 0xc3: /* 450, 4/0/0/0, 2 */ + priv->magic_not_rop_nr = 0x03; + break; + case 0xc4: /* 460, 3/4/0/0, 4 */ + priv->magic_not_rop_nr = 0x01; + break; + case 0xc1: /* 2/0/0/0, 1 */ + priv->magic_not_rop_nr = 0x01; + break; + case 0xc8: /* 4/4/3/4, 5 */ + priv->magic_not_rop_nr = 0x06; + break; + case 0xce: /* 4/4/0/0, 4 */ + priv->magic_not_rop_nr = 0x03; + break; + case 0xcf: /* 4/0/0/0, 3 */ + priv->magic_not_rop_nr = 0x03; + break; + case 0xd9: /* 1/0/0/0, 1 */ + priv->magic_not_rop_nr = 0x01; + break; + } + + return 0; +} + +static void +nvc0_graph_dtor_fw(struct nvc0_graph_fuc *fuc) +{ + kfree(fuc->data); + fuc->data = NULL; +} + +void +nvc0_graph_dtor(struct nouveau_object *object) +{ + struct nvc0_graph_priv *priv = (void *)object; + + kfree(priv->data); + + nvc0_graph_dtor_fw(&priv->fuc409c); + nvc0_graph_dtor_fw(&priv->fuc409d); + nvc0_graph_dtor_fw(&priv->fuc41ac); + nvc0_graph_dtor_fw(&priv->fuc41ad); + + nouveau_gpuobj_ref(NULL, &priv->unk4188b8); + nouveau_gpuobj_ref(NULL, &priv->unk4188b4); + + nouveau_graph_destroy(&priv->base); +} + +static void +nvc0_graph_init_obj418880(struct nvc0_graph_priv *priv) +{ + int i; + + nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000); + nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000); + for (i = 0; i < 4; i++) + nv_wr32(priv, GPC_BCAST(0x0888) + (i * 4), 0x00000000); + nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8); + nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8); +} + +static void +nvc0_graph_init_regs(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x400080, 0x003083c2); + nv_wr32(priv, 0x400088, 0x00006fe7); + nv_wr32(priv, 0x40008c, 0x00000000); + nv_wr32(priv, 0x400090, 0x00000030); + nv_wr32(priv, 0x40013c, 0x013901f7); + nv_wr32(priv, 0x400140, 0x00000100); + nv_wr32(priv, 0x400144, 0x00000000); + nv_wr32(priv, 0x400148, 0x00000110); + nv_wr32(priv, 0x400138, 0x00000000); + nv_wr32(priv, 0x400130, 0x00000000); + nv_wr32(priv, 0x400134, 0x00000000); + nv_wr32(priv, 0x400124, 0x00000002); +} + +static void +nvc0_graph_init_gpc_0(struct nvc0_graph_priv *priv) +{ + const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total); + u32 data[TPC_MAX / 8]; + u8 tpcnr[GPC_MAX]; + int i, gpc, tpc; + + nv_wr32(priv, TPC_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */ + + /* + * TP ROP UNKVAL(magic_not_rop_nr) + * 450: 4/0/0/0 2 3 + * 460: 3/4/0/0 4 1 + * 465: 3/4/4/0 4 7 + * 470: 3/3/4/4 5 5 + * 480: 3/4/4/4 6 6 + */ + + memset(data, 0x00, sizeof(data)); + memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); + for (i = 0, gpc = -1; i < priv->tpc_total; i++) { + do { + gpc = (gpc + 1) % priv->gpc_nr; + } while (!tpcnr[gpc]); + tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--; + + data[i / 8] |= tpc << ((i % 8) * 4); + } + + nv_wr32(priv, GPC_BCAST(0x0980), data[0]); + nv_wr32(priv, GPC_BCAST(0x0984), data[1]); + nv_wr32(priv, GPC_BCAST(0x0988), data[2]); + nv_wr32(priv, GPC_BCAST(0x098c), data[3]); + + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + nv_wr32(priv, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 | + priv->tpc_nr[gpc]); + nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total); + nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918); + } + + nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918); + nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800)); +} + +static void +nvc0_graph_init_units(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x409c24, 0x000f0000); + nv_wr32(priv, 0x404000, 0xc0000000); /* DISPATCH */ + nv_wr32(priv, 0x404600, 0xc0000000); /* M2MF */ + nv_wr32(priv, 0x408030, 0xc0000000); + nv_wr32(priv, 0x40601c, 0xc0000000); + nv_wr32(priv, 0x404490, 0xc0000000); /* MACRO */ + nv_wr32(priv, 0x406018, 0xc0000000); + nv_wr32(priv, 0x405840, 0xc0000000); + nv_wr32(priv, 0x405844, 0x00ffffff); + nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008); + nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000); +} + +static void +nvc0_graph_init_gpc_1(struct nvc0_graph_priv *priv) +{ + int gpc, tpc; + + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000); + nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000); + nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000); + nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000); + for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f); + } + nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff); + nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff); + } +} + +static void +nvc0_graph_init_rop(struct nvc0_graph_priv *priv) +{ + int rop; + + for (rop = 0; rop < priv->rop_nr; rop++) { + nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000); + nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000); + nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff); + nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff); + } +} + +void +nvc0_graph_init_fw(struct nvc0_graph_priv *priv, u32 fuc_base, + struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data) +{ + int i; + + nv_wr32(priv, fuc_base + 0x01c0, 0x01000000); + for (i = 0; i < data->size / 4; i++) + nv_wr32(priv, fuc_base + 0x01c4, data->data[i]); + + nv_wr32(priv, fuc_base + 0x0180, 0x01000000); + for (i = 0; i < code->size / 4; i++) { + if ((i & 0x3f) == 0) + nv_wr32(priv, fuc_base + 0x0188, i >> 6); + nv_wr32(priv, fuc_base + 0x0184, code->data[i]); + } +} + +static int +nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv) +{ + u32 r000260; + int i; + + if (priv->firmware) { + /* load fuc microcode */ + r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000); + nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c, + &priv->fuc409d); + nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac, + &priv->fuc41ad); + nv_wr32(priv, 0x000260, r000260); + + /* start both of them running */ + nv_wr32(priv, 0x409840, 0xffffffff); + nv_wr32(priv, 0x41a10c, 0x00000000); + nv_wr32(priv, 0x40910c, 0x00000000); + nv_wr32(priv, 0x41a100, 0x00000002); + nv_wr32(priv, 0x409100, 0x00000002); + if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001)) + nv_warn(priv, "0x409800 wait failed\n"); + + nv_wr32(priv, 0x409840, 0xffffffff); + nv_wr32(priv, 0x409500, 0x7fffffff); + nv_wr32(priv, 0x409504, 0x00000021); + + nv_wr32(priv, 0x409840, 0xffffffff); + nv_wr32(priv, 0x409500, 0x00000000); + nv_wr32(priv, 0x409504, 0x00000010); + if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { + nv_error(priv, "fuc09 req 0x10 timeout\n"); + return -EBUSY; + } + priv->size = nv_rd32(priv, 0x409800); + + nv_wr32(priv, 0x409840, 0xffffffff); + nv_wr32(priv, 0x409500, 0x00000000); + nv_wr32(priv, 0x409504, 0x00000016); + if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { + nv_error(priv, "fuc09 req 0x16 timeout\n"); + return -EBUSY; + } + + nv_wr32(priv, 0x409840, 0xffffffff); + nv_wr32(priv, 0x409500, 0x00000000); + nv_wr32(priv, 0x409504, 0x00000025); + if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { + nv_error(priv, "fuc09 req 0x25 timeout\n"); + return -EBUSY; + } + + if (priv->data == NULL) { + int ret = nvc0_grctx_generate(priv); + if (ret) { + nv_error(priv, "failed to construct context\n"); + return ret; + } + } + + return 0; + } + + /* load HUB microcode */ + r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000); + nv_wr32(priv, 0x4091c0, 0x01000000); + for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++) + nv_wr32(priv, 0x4091c4, nvc0_grhub_data[i]); + + nv_wr32(priv, 0x409180, 0x01000000); + for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) { + if ((i & 0x3f) == 0) + nv_wr32(priv, 0x409188, i >> 6); + nv_wr32(priv, 0x409184, nvc0_grhub_code[i]); + } + + /* load GPC microcode */ + nv_wr32(priv, 0x41a1c0, 0x01000000); + for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++) + nv_wr32(priv, 0x41a1c4, nvc0_grgpc_data[i]); + + nv_wr32(priv, 0x41a180, 0x01000000); + for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) { + if ((i & 0x3f) == 0) + nv_wr32(priv, 0x41a188, i >> 6); + nv_wr32(priv, 0x41a184, nvc0_grgpc_code[i]); + } + nv_wr32(priv, 0x000260, r000260); + + /* start HUB ucode running, it'll init the GPCs */ + nv_wr32(priv, 0x409800, nv_device(priv)->chipset); + nv_wr32(priv, 0x40910c, 0x00000000); + nv_wr32(priv, 0x409100, 0x00000002); + if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) { + nv_error(priv, "HUB_INIT timed out\n"); + nvc0_graph_ctxctl_debug(priv); + return -EBUSY; + } + + priv->size = nv_rd32(priv, 0x409804); + if (priv->data == NULL) { + int ret = nvc0_grctx_generate(priv); + if (ret) { + nv_error(priv, "failed to construct context\n"); + return ret; + } + } + + return 0; +} + +static int +nvc0_graph_init(struct nouveau_object *object) +{ + struct nvc0_graph_priv *priv = (void *)object; + int ret; + + ret = nouveau_graph_init(&priv->base); + if (ret) + return ret; + + nvc0_graph_init_obj418880(priv); + nvc0_graph_init_regs(priv); + /*nvc0_graph_init_unitplemented_magics(priv);*/ + nvc0_graph_init_gpc_0(priv); + /*nvc0_graph_init_unitplemented_c242(priv);*/ + + nv_wr32(priv, 0x400500, 0x00010001); + nv_wr32(priv, 0x400100, 0xffffffff); + nv_wr32(priv, 0x40013c, 0xffffffff); + + nvc0_graph_init_units(priv); + nvc0_graph_init_gpc_1(priv); + nvc0_graph_init_rop(priv); + + nv_wr32(priv, 0x400108, 0xffffffff); + nv_wr32(priv, 0x400138, 0xffffffff); + nv_wr32(priv, 0x400118, 0xffffffff); + nv_wr32(priv, 0x400130, 0xffffffff); + nv_wr32(priv, 0x40011c, 0xffffffff); + nv_wr32(priv, 0x400134, 0xffffffff); + nv_wr32(priv, 0x400054, 0x34ce3464); + + ret = nvc0_graph_init_ctxctl(priv); + if (ret) + return ret; + + return 0; +} + +struct nouveau_oclass +nvc0_graph_oclass = { + .handle = NV_ENGINE(GR, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_graph_ctor, + .dtor = nvc0_graph_dtor, + .init = nvc0_graph_init, + .fini = _nouveau_graph_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h new file mode 100644 index 0000000..c870dad --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h @@ -0,0 +1,175 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#ifndef __NVC0_GRAPH_H__ +#define __NVC0_GRAPH_H__ + +#include <core/client.h> +#include <core/handle.h> +#include <core/gpuobj.h> +#include <core/option.h> + +#include <subdev/fb.h> +#include <subdev/vm.h> +#include <subdev/bar.h> +#include <subdev/timer.h> + +#include <engine/fifo.h> +#include <engine/graph.h> + +#define GPC_MAX 4 +#define TPC_MAX 32 + +#define ROP_BCAST(r) (0x408800 + (r)) +#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r)) +#define GPC_BCAST(r) (0x418000 + (r)) +#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r)) +#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r)) + +struct nvc0_graph_data { + u32 size; + u32 align; + u32 access; +}; + +struct nvc0_graph_mmio { + u32 addr; + u32 data; + u32 shift; + u32 buffer; +}; + +struct nvc0_graph_fuc { + u32 *data; + u32 size; +}; + +struct nvc0_graph_priv { + struct nouveau_graph base; + + struct nvc0_graph_fuc fuc409c; + struct nvc0_graph_fuc fuc409d; + struct nvc0_graph_fuc fuc41ac; + struct nvc0_graph_fuc fuc41ad; + bool firmware; + + u8 rop_nr; + u8 gpc_nr; + u8 tpc_nr[GPC_MAX]; + u8 tpc_total; + + struct nouveau_gpuobj *unk4188b4; + struct nouveau_gpuobj *unk4188b8; + + struct nvc0_graph_data mmio_data[4]; + struct nvc0_graph_mmio mmio_list[4096/8]; + u32 size; + u32 *data; + + u8 magic_not_rop_nr; +}; + +struct nvc0_graph_chan { + struct nouveau_graph_chan base; + + struct nouveau_gpuobj *mmio; + struct nouveau_vma mmio_vma; + int mmio_nr; + struct { + struct nouveau_gpuobj *mem; + struct nouveau_vma vma; + } data[4]; +}; + +static inline u32 +nvc0_graph_class(void *obj) +{ + struct nouveau_device *device = nv_device(obj); + + switch (device->chipset) { + case 0xc0: + case 0xc3: + case 0xc4: + case 0xce: /* guess, mmio trace shows only 0x9097 state */ + case 0xcf: /* guess, mmio trace shows only 0x9097 state */ + return 0x9097; + case 0xc1: + return 0x9197; + case 0xc8: + case 0xd9: + case 0xd7: + return 0x9297; + case 0xe4: + case 0xe7: + case 0xe6: + return 0xa097; + default: + return 0; + } +} + +void nv_icmd(struct nvc0_graph_priv *priv, u32 icmd, u32 data); + +static inline void +nv_mthd(struct nvc0_graph_priv *priv, u32 class, u32 mthd, u32 data) +{ + nv_wr32(priv, 0x40448c, data); + nv_wr32(priv, 0x404488, 0x80000000 | (mthd << 14) | class); +} + +struct nvc0_grctx { + struct nvc0_graph_priv *priv; + struct nvc0_graph_data *data; + struct nvc0_graph_mmio *mmio; + struct nouveau_gpuobj *chan; + int buffer_nr; + u64 buffer[4]; + u64 addr; +}; + +int nvc0_grctx_generate(struct nvc0_graph_priv *); +int nvc0_grctx_init(struct nvc0_graph_priv *, struct nvc0_grctx *); +void nvc0_grctx_data(struct nvc0_grctx *, u32, u32, u32); +void nvc0_grctx_mmio(struct nvc0_grctx *, u32, u32, u32, u32); +int nvc0_grctx_fini(struct nvc0_grctx *); + +int nve0_grctx_generate(struct nvc0_graph_priv *); + +#define mmio_data(s,a,p) nvc0_grctx_data(&info, (s), (a), (p)) +#define mmio_list(r,d,s,b) nvc0_grctx_mmio(&info, (r), (d), (s), (b)) + +void nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *); +int nvc0_graph_ctor_fw(struct nvc0_graph_priv *, const char *, + struct nvc0_graph_fuc *); +void nvc0_graph_dtor(struct nouveau_object *); +void nvc0_graph_init_fw(struct nvc0_graph_priv *, u32 base, + struct nvc0_graph_fuc *, struct nvc0_graph_fuc *); +int nvc0_graph_context_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); +void nvc0_graph_context_dtor(struct nouveau_object *); + +u64 nvc0_graph_units(struct nouveau_graph *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c new file mode 100644 index 0000000..678c16f --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c @@ -0,0 +1,807 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "nvc0.h" +#include "fuc/hubnve0.fuc.h" +#include "fuc/gpcnve0.fuc.h" + +/******************************************************************************* + * Graphics object classes + ******************************************************************************/ + +static struct nouveau_oclass +nve0_graph_sclass[] = { + { 0x902d, &nouveau_object_ofuncs }, + { 0xa040, &nouveau_object_ofuncs }, + { 0xa097, &nouveau_object_ofuncs }, + { 0xa0c0, &nouveau_object_ofuncs }, + { 0xa0b5, &nouveau_object_ofuncs }, + {} +}; + +/******************************************************************************* + * PGRAPH context + ******************************************************************************/ + +static struct nouveau_oclass +nve0_graph_cclass = { + .handle = NV_ENGCTX(GR, 0xe0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_graph_context_ctor, + .dtor = nvc0_graph_context_dtor, + .init = _nouveau_graph_context_init, + .fini = _nouveau_graph_context_fini, + .rd32 = _nouveau_graph_context_rd32, + .wr32 = _nouveau_graph_context_wr32, + }, +}; + +/******************************************************************************* + * PGRAPH engine/subdev functions + ******************************************************************************/ + +static void +nve0_graph_ctxctl_isr(struct nvc0_graph_priv *priv) +{ + u32 ustat = nv_rd32(priv, 0x409c18); + + if (ustat & 0x00000001) + nv_error(priv, "CTXCTRL ucode error\n"); + if (ustat & 0x00080000) + nv_error(priv, "CTXCTRL watchdog timeout\n"); + if (ustat & ~0x00080001) + nv_error(priv, "CTXCTRL 0x%08x\n", ustat); + + nvc0_graph_ctxctl_debug(priv); + nv_wr32(priv, 0x409c20, ustat); +} + +static const struct nouveau_enum nve0_mp_warp_error[] = { + { 0x00, "NO_ERROR" }, + { 0x01, "STACK_MISMATCH" }, + { 0x05, "MISALIGNED_PC" }, + { 0x08, "MISALIGNED_GPR" }, + { 0x09, "INVALID_OPCODE" }, + { 0x0d, "GPR_OUT_OF_BOUNDS" }, + { 0x0e, "MEM_OUT_OF_BOUNDS" }, + { 0x0f, "UNALIGNED_MEM_ACCESS" }, + { 0x11, "INVALID_PARAM" }, + {} +}; + +static const struct nouveau_enum nve0_mp_global_error[] = { + { 2, "MULTIPLE_WARP_ERRORS" }, + { 3, "OUT_OF_STACK_SPACE" }, + {} +}; + +static const struct nouveau_enum nve0_gpc_rop_error[] = { + { 1, "RT_PITCH_OVERRUN" }, + { 4, "RT_WIDTH_OVERRUN" }, + { 5, "RT_HEIGHT_OVERRUN" }, + { 7, "ZETA_STORAGE_TYPE_MISMATCH" }, + { 8, "RT_STORAGE_TYPE_MISMATCH" }, + { 10, "RT_LINEAR_MISMATCH" }, + {} +}; + +static const struct nouveau_enum nve0_sked_error[] = { + { 7, "CONSTANT_BUFFER_SIZE" }, + { 9, "LOCAL_MEMORY_SIZE_POS" }, + { 10, "LOCAL_MEMORY_SIZE_NEG" }, + { 11, "WARP_CSTACK_SIZE" }, + { 12, "TOTAL_TEMP_SIZE" }, + { 13, "REGISTER_COUNT" }, + { 18, "TOTAL_THREADS" }, + { 20, "PROGRAM_OFFSET" }, + { 21, "SHARED_MEMORY_SIZE" }, + { 25, "SHARED_CONFIG_TOO_SMALL" }, + { 26, "TOTAL_REGISTER_COUNT" }, + {} +}; + +static void +nve0_graph_mp_trap(struct nvc0_graph_priv *priv, int gpc, int tp) +{ + int i; + u32 werr = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x648)); + u32 gerr = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x650)); + + nv_error(priv, "GPC%i/TP%i/MP trap:", gpc, tp); + + for (i = 0; i <= 31; ++i) { + if (!(gerr & (1 << i))) + continue; + pr_cont(" "); + nouveau_enum_print(nve0_mp_global_error, i); + } + if (werr) { + pr_cont(" "); + nouveau_enum_print(nve0_mp_warp_error, werr & 0xffff); + } + pr_cont("\n"); + + /* disable MP trap to avoid spam */ + nv_mask(priv, TPC_UNIT(gpc, tp, 0x50c), 0x2, 0x0); + + /* TODO: figure out how to resume after an MP trap */ +} + +static void +nve0_graph_tp_trap(struct nvc0_graph_priv *priv, int gpc, int tp) +{ + u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x508)); + + if (stat & 0x1) { + u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x224)); + nv_error(priv, "GPC%i/TP%i/TEX trap: %08x\n", + gpc, tp, trap); + + nv_wr32(priv, TPC_UNIT(gpc, tp, 0x224), 0xc0000000); + stat &= ~0x1; + } + + if (stat & 0x2) { + nve0_graph_mp_trap(priv, gpc, tp); + stat &= ~0x2; + } + + if (stat & 0x4) { + u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x084)); + nv_error(priv, "GPC%i/TP%i/POLY trap: %08x\n", + gpc, tp, trap); + + nv_wr32(priv, TPC_UNIT(gpc, tp, 0x084), 0xc0000000); + stat &= ~0x4; + } + + if (stat & 0x8) { + u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x48c)); + nv_error(priv, "GPC%i/TP%i/L1C trap: %08x\n", + gpc, tp, trap); + + nv_wr32(priv, TPC_UNIT(gpc, tp, 0x48c), 0xc0000000); + stat &= ~0x8; + } + + if (stat) { + nv_error(priv, "GPC%i/TP%i: unknown stat %08x\n", + gpc, tp, stat); + } +} + +static void +nve0_graph_gpc_trap(struct nvc0_graph_priv *priv) +{ + const u32 mask = nv_rd32(priv, 0x400118); + int gpc; + + for (gpc = 0; gpc < 4; ++gpc) { + u32 stat; + int tp; + + if (!(mask & (1 << gpc))) + continue; + stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90)); + + if (stat & 0x0001) { + u32 trap[4]; + int i; + + trap[0] = nv_rd32(priv, GPC_UNIT(gpc, 0x0420)); + trap[1] = nv_rd32(priv, GPC_UNIT(gpc, 0x0434)); + trap[2] = nv_rd32(priv, GPC_UNIT(gpc, 0x0438)); + trap[3] = nv_rd32(priv, GPC_UNIT(gpc, 0x043c)); + + nv_error(priv, "GPC%i/PROP trap:", gpc); + for (i = 0; i <= 29; ++i) { + if (!(trap[0] & (1 << i))) + continue; + pr_cont(" "); + nouveau_enum_print(nve0_gpc_rop_error, i); + } + pr_cont("\n"); + + nv_error(priv, "x = %u, y = %u, " + "format = %x, storage type = %x\n", + trap[1] & 0xffff, + trap[1] >> 16, + (trap[2] >> 8) & 0x3f, + trap[3] & 0xff); + + nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000); + stat &= ~0x0001; + } + + if (stat & 0x0002) { + u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0900)); + nv_error(priv, "GPC%i/ZCULL trap: %08x\n", gpc, + trap); + nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000); + stat &= ~0x0002; + } + + if (stat & 0x0004) { + u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x1028)); + nv_error(priv, "GPC%i/CCACHE trap: %08x\n", gpc, + trap); + nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000); + stat &= ~0x0004; + } + + if (stat & 0x0008) { + u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0824)); + nv_error(priv, "GPC%i/ESETUP trap %08x\n", gpc, + trap); + nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000); + stat &= ~0x0008; + } + + for (tp = 0; tp < 8; ++tp) { + if (stat & (1 << (16 + tp))) + nve0_graph_tp_trap(priv, gpc, tp); + } + stat &= ~0xff0000; + + if (stat) { + nv_error(priv, "GPC%i: unknown stat %08x\n", + gpc, stat); + } + } +} + + +static void +nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst, + struct nouveau_object *engctx) +{ + u32 trap = nv_rd32(priv, 0x400108); + int i; + int rop; + + if (trap & 0x00000001) { + u32 stat = nv_rd32(priv, 0x404000); + nv_error(priv, "DISPATCH ch %d [0x%010llx %s] 0x%08x\n", + chid, inst, nouveau_client_name(engctx), stat); + nv_wr32(priv, 0x404000, 0xc0000000); + nv_wr32(priv, 0x400108, 0x00000001); + trap &= ~0x00000001; + } + + if (trap & 0x00000010) { + u32 stat = nv_rd32(priv, 0x405840); + nv_error(priv, "SHADER ch %d [0x%010llx %s] 0x%08x\n", + chid, inst, nouveau_client_name(engctx), stat); + nv_wr32(priv, 0x405840, 0xc0000000); + nv_wr32(priv, 0x400108, 0x00000010); + trap &= ~0x00000010; + } + + if (trap & 0x00000100) { + u32 stat = nv_rd32(priv, 0x407020); + nv_error(priv, "SKED ch %d [0x%010llx %s]:", + chid, inst, nouveau_client_name(engctx)); + + for (i = 0; i <= 29; ++i) { + if (!(stat & (1 << i))) + continue; + pr_cont(" "); + nouveau_enum_print(nve0_sked_error, i); + } + pr_cont("\n"); + + if (stat & 0x3fffffff) + nv_wr32(priv, 0x407020, 0x40000000); + nv_wr32(priv, 0x400108, 0x00000100); + trap &= ~0x00000100; + } + + if (trap & 0x01000000) { + nv_error(priv, "GPC ch %d [0x%010llx %s]:\n", + chid, inst, nouveau_client_name(engctx)); + nve0_graph_gpc_trap(priv); + trap &= ~0x01000000; + } + + if (trap & 0x02000000) { + for (rop = 0; rop < priv->rop_nr; rop++) { + u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070)); + u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144)); + nv_error(priv, + "ROP%d ch %d [0x%010llx %s] 0x%08x 0x%08x\n", + rop, chid, inst, nouveau_client_name(engctx), + statz, statc); + nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000); + nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000); + } + nv_wr32(priv, 0x400108, 0x02000000); + trap &= ~0x02000000; + } + + if (trap) { + nv_error(priv, "TRAP ch %d [0x%010llx %s] 0x%08x\n", + chid, inst, nouveau_client_name(engctx), trap); + nv_wr32(priv, 0x400108, trap); + } +} + +static void +nve0_graph_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_fifo *pfifo = nouveau_fifo(subdev); + struct nouveau_engine *engine = nv_engine(subdev); + struct nouveau_object *engctx; + struct nouveau_handle *handle; + struct nvc0_graph_priv *priv = (void *)subdev; + u64 inst = nv_rd32(priv, 0x409b00) & 0x0fffffff; + u32 stat = nv_rd32(priv, 0x400100); + u32 addr = nv_rd32(priv, 0x400704); + u32 mthd = (addr & 0x00003ffc); + u32 subc = (addr & 0x00070000) >> 16; + u32 data = nv_rd32(priv, 0x400708); + u32 code = nv_rd32(priv, 0x400110); + u32 class = nv_rd32(priv, 0x404200 + (subc * 4)); + int chid; + + engctx = nouveau_engctx_get(engine, inst); + chid = pfifo->chid(pfifo, engctx); + + if (stat & 0x00000010) { + handle = nouveau_handle_get_class(engctx, class); + if (!handle || nv_call(handle->object, mthd, data)) { + nv_error(priv, + "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", + chid, inst, nouveau_client_name(engctx), subc, + class, mthd, data); + } + nouveau_handle_put(handle); + nv_wr32(priv, 0x400100, 0x00000010); + stat &= ~0x00000010; + } + + if (stat & 0x00000020) { + nv_error(priv, + "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", + chid, inst, nouveau_client_name(engctx), subc, class, + mthd, data); + nv_wr32(priv, 0x400100, 0x00000020); + stat &= ~0x00000020; + } + + if (stat & 0x00100000) { + nv_error(priv, "DATA_ERROR ["); + nouveau_enum_print(nv50_data_error_names, code); + pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n", + chid, inst, nouveau_client_name(engctx), subc, class, + mthd, data); + nv_wr32(priv, 0x400100, 0x00100000); + stat &= ~0x00100000; + } + + if (stat & 0x00200000) { + nve0_graph_trap_isr(priv, chid, inst, engctx); + nv_wr32(priv, 0x400100, 0x00200000); + stat &= ~0x00200000; + } + + if (stat & 0x00080000) { + nve0_graph_ctxctl_isr(priv); + nv_wr32(priv, 0x400100, 0x00080000); + stat &= ~0x00080000; + } + + if (stat) { + nv_error(priv, "unknown stat 0x%08x\n", stat); + nv_wr32(priv, 0x400100, stat); + } + + nv_wr32(priv, 0x400500, 0x00010001); + nouveau_engctx_put(engctx); +} + +static int +nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct nvc0_graph_priv *priv; + int ret, i; + + ret = nouveau_graph_create(parent, engine, oclass, true, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x18001000; + nv_subdev(priv)->intr = nve0_graph_intr; + nv_engine(priv)->cclass = &nve0_graph_cclass; + nv_engine(priv)->sclass = nve0_graph_sclass; + + priv->base.units = nvc0_graph_units; + + if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) { + nv_info(priv, "using external firmware\n"); + if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) || + nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) || + nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) || + nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad)) + return -EINVAL; + priv->firmware = true; + } + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0, + &priv->unk4188b4); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0, + &priv->unk4188b8); + if (ret) + return ret; + + for (i = 0; i < 0x1000; i += 4) { + nv_wo32(priv->unk4188b4, i, 0x00000010); + nv_wo32(priv->unk4188b8, i, 0x00000010); + } + + priv->gpc_nr = nv_rd32(priv, 0x409604) & 0x0000001f; + priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16; + for (i = 0; i < priv->gpc_nr; i++) { + priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608)); + priv->tpc_total += priv->tpc_nr[i]; + } + + switch (nv_device(priv)->chipset) { + case 0xe4: + if (priv->tpc_total == 8) + priv->magic_not_rop_nr = 3; + else + if (priv->tpc_total == 7) + priv->magic_not_rop_nr = 1; + break; + case 0xe7: + case 0xe6: + priv->magic_not_rop_nr = 1; + break; + default: + break; + } + + return 0; +} + +static void +nve0_graph_init_obj418880(struct nvc0_graph_priv *priv) +{ + int i; + + nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000); + nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000); + for (i = 0; i < 4; i++) + nv_wr32(priv, GPC_BCAST(0x0888) + (i * 4), 0x00000000); + nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8); + nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8); +} + +static void +nve0_graph_init_regs(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x400080, 0x003083c2); + nv_wr32(priv, 0x400088, 0x0001ffe7); + nv_wr32(priv, 0x40008c, 0x00000000); + nv_wr32(priv, 0x400090, 0x00000030); + nv_wr32(priv, 0x40013c, 0x003901f7); + nv_wr32(priv, 0x400140, 0x00000100); + nv_wr32(priv, 0x400144, 0x00000000); + nv_wr32(priv, 0x400148, 0x00000110); + nv_wr32(priv, 0x400138, 0x00000000); + nv_wr32(priv, 0x400130, 0x00000000); + nv_wr32(priv, 0x400134, 0x00000000); + nv_wr32(priv, 0x400124, 0x00000002); +} + +static void +nve0_graph_init_units(struct nvc0_graph_priv *priv) +{ + nv_wr32(priv, 0x409ffc, 0x00000000); + nv_wr32(priv, 0x409c14, 0x00003e3e); + nv_wr32(priv, 0x409c24, 0x000f0000); + + nv_wr32(priv, 0x404000, 0xc0000000); + nv_wr32(priv, 0x404600, 0xc0000000); + nv_wr32(priv, 0x408030, 0xc0000000); + nv_wr32(priv, 0x404490, 0xc0000000); + nv_wr32(priv, 0x406018, 0xc0000000); + nv_wr32(priv, 0x407020, 0xc0000000); + nv_wr32(priv, 0x405840, 0xc0000000); + nv_wr32(priv, 0x405844, 0x00ffffff); + + nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008); + nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000); + +} + +static void +nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv) +{ + const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total); + u32 data[TPC_MAX / 8]; + u8 tpcnr[GPC_MAX]; + int i, gpc, tpc; + + nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001); + + memset(data, 0x00, sizeof(data)); + memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr)); + for (i = 0, gpc = -1; i < priv->tpc_total; i++) { + do { + gpc = (gpc + 1) % priv->gpc_nr; + } while (!tpcnr[gpc]); + tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--; + + data[i / 8] |= tpc << ((i % 8) * 4); + } + + nv_wr32(priv, GPC_BCAST(0x0980), data[0]); + nv_wr32(priv, GPC_BCAST(0x0984), data[1]); + nv_wr32(priv, GPC_BCAST(0x0988), data[2]); + nv_wr32(priv, GPC_BCAST(0x098c), data[3]); + + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + nv_wr32(priv, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 | + priv->tpc_nr[gpc]); + nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total); + nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918); + } + + nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918); + nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800)); +} + +static void +nve0_graph_init_gpc_1(struct nvc0_graph_priv *priv) +{ + int gpc, tpc; + + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + nv_wr32(priv, GPC_UNIT(gpc, 0x3038), 0xc0000000); + nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000); + nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000); + nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000); + nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000); + for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe); + nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f); + } + nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff); + nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff); + } +} + +static void +nve0_graph_init_rop(struct nvc0_graph_priv *priv) +{ + int rop; + + for (rop = 0; rop < priv->rop_nr; rop++) { + nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000); + nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000); + nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff); + nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff); + } +} + +static int +nve0_graph_init_ctxctl(struct nvc0_graph_priv *priv) +{ + u32 r000260; + int i; + + if (priv->firmware) { + /* load fuc microcode */ + r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000); + nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c, &priv->fuc409d); + nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac, &priv->fuc41ad); + nv_wr32(priv, 0x000260, r000260); + + /* start both of them running */ + nv_wr32(priv, 0x409840, 0xffffffff); + nv_wr32(priv, 0x41a10c, 0x00000000); + nv_wr32(priv, 0x40910c, 0x00000000); + nv_wr32(priv, 0x41a100, 0x00000002); + nv_wr32(priv, 0x409100, 0x00000002); + if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001)) + nv_error(priv, "0x409800 wait failed\n"); + + nv_wr32(priv, 0x409840, 0xffffffff); + nv_wr32(priv, 0x409500, 0x7fffffff); + nv_wr32(priv, 0x409504, 0x00000021); + + nv_wr32(priv, 0x409840, 0xffffffff); + nv_wr32(priv, 0x409500, 0x00000000); + nv_wr32(priv, 0x409504, 0x00000010); + if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { + nv_error(priv, "fuc09 req 0x10 timeout\n"); + return -EBUSY; + } + priv->size = nv_rd32(priv, 0x409800); + + nv_wr32(priv, 0x409840, 0xffffffff); + nv_wr32(priv, 0x409500, 0x00000000); + nv_wr32(priv, 0x409504, 0x00000016); + if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { + nv_error(priv, "fuc09 req 0x16 timeout\n"); + return -EBUSY; + } + + nv_wr32(priv, 0x409840, 0xffffffff); + nv_wr32(priv, 0x409500, 0x00000000); + nv_wr32(priv, 0x409504, 0x00000025); + if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { + nv_error(priv, "fuc09 req 0x25 timeout\n"); + return -EBUSY; + } + + nv_wr32(priv, 0x409800, 0x00000000); + nv_wr32(priv, 0x409500, 0x00000001); + nv_wr32(priv, 0x409504, 0x00000030); + if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { + nv_error(priv, "fuc09 req 0x30 timeout\n"); + return -EBUSY; + } + + nv_wr32(priv, 0x409810, 0xb00095c8); + nv_wr32(priv, 0x409800, 0x00000000); + nv_wr32(priv, 0x409500, 0x00000001); + nv_wr32(priv, 0x409504, 0x00000031); + if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { + nv_error(priv, "fuc09 req 0x31 timeout\n"); + return -EBUSY; + } + + nv_wr32(priv, 0x409810, 0x00080420); + nv_wr32(priv, 0x409800, 0x00000000); + nv_wr32(priv, 0x409500, 0x00000001); + nv_wr32(priv, 0x409504, 0x00000032); + if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) { + nv_error(priv, "fuc09 req 0x32 timeout\n"); + return -EBUSY; + } + + nv_wr32(priv, 0x409614, 0x00000070); + nv_wr32(priv, 0x409614, 0x00000770); + nv_wr32(priv, 0x40802c, 0x00000001); + + if (priv->data == NULL) { + int ret = nve0_grctx_generate(priv); + if (ret) { + nv_error(priv, "failed to construct context\n"); + return ret; + } + } + + return 0; + } + + /* load HUB microcode */ + r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000); + nv_wr32(priv, 0x4091c0, 0x01000000); + for (i = 0; i < sizeof(nve0_grhub_data) / 4; i++) + nv_wr32(priv, 0x4091c4, nve0_grhub_data[i]); + + nv_wr32(priv, 0x409180, 0x01000000); + for (i = 0; i < sizeof(nve0_grhub_code) / 4; i++) { + if ((i & 0x3f) == 0) + nv_wr32(priv, 0x409188, i >> 6); + nv_wr32(priv, 0x409184, nve0_grhub_code[i]); + } + + /* load GPC microcode */ + nv_wr32(priv, 0x41a1c0, 0x01000000); + for (i = 0; i < sizeof(nve0_grgpc_data) / 4; i++) + nv_wr32(priv, 0x41a1c4, nve0_grgpc_data[i]); + + nv_wr32(priv, 0x41a180, 0x01000000); + for (i = 0; i < sizeof(nve0_grgpc_code) / 4; i++) { + if ((i & 0x3f) == 0) + nv_wr32(priv, 0x41a188, i >> 6); + nv_wr32(priv, 0x41a184, nve0_grgpc_code[i]); + } + nv_wr32(priv, 0x000260, r000260); + + /* start HUB ucode running, it'll init the GPCs */ + nv_wr32(priv, 0x409800, nv_device(priv)->chipset); + nv_wr32(priv, 0x40910c, 0x00000000); + nv_wr32(priv, 0x409100, 0x00000002); + if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) { + nv_error(priv, "HUB_INIT timed out\n"); + nvc0_graph_ctxctl_debug(priv); + return -EBUSY; + } + + priv->size = nv_rd32(priv, 0x409804); + if (priv->data == NULL) { + int ret = nve0_grctx_generate(priv); + if (ret) { + nv_error(priv, "failed to construct context\n"); + return ret; + } + } + + return 0; +} + +static int +nve0_graph_init(struct nouveau_object *object) +{ + struct nvc0_graph_priv *priv = (void *)object; + int ret; + + ret = nouveau_graph_init(&priv->base); + if (ret) + return ret; + + nve0_graph_init_obj418880(priv); + nve0_graph_init_regs(priv); + nve0_graph_init_gpc_0(priv); + + nv_wr32(priv, 0x400500, 0x00010001); + nv_wr32(priv, 0x400100, 0xffffffff); + nv_wr32(priv, 0x40013c, 0xffffffff); + + nve0_graph_init_units(priv); + nve0_graph_init_gpc_1(priv); + nve0_graph_init_rop(priv); + + nv_wr32(priv, 0x400108, 0xffffffff); + nv_wr32(priv, 0x400138, 0xffffffff); + nv_wr32(priv, 0x400118, 0xffffffff); + nv_wr32(priv, 0x400130, 0xffffffff); + nv_wr32(priv, 0x40011c, 0xffffffff); + nv_wr32(priv, 0x400134, 0xffffffff); + nv_wr32(priv, 0x400054, 0x34ce3464); + + ret = nve0_graph_init_ctxctl(priv); + if (ret) + return ret; + + return 0; +} + +struct nouveau_oclass +nve0_graph_oclass = { + .handle = NV_ENGINE(GR, 0xe0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nve0_graph_ctor, + .dtor = nvc0_graph_dtor, + .init = nve0_graph_init, + .fini = _nouveau_graph_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h new file mode 100644 index 0000000..fde8e24 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h @@ -0,0 +1,274 @@ +#ifndef __NOUVEAU_GRAPH_REGS_H__ +#define __NOUVEAU_GRAPH_REGS_H__ + +#define NV04_PGRAPH_DEBUG_0 0x00400080 +#define NV04_PGRAPH_DEBUG_1 0x00400084 +#define NV04_PGRAPH_DEBUG_2 0x00400088 +#define NV04_PGRAPH_DEBUG_3 0x0040008c +#define NV10_PGRAPH_DEBUG_4 0x00400090 +#define NV03_PGRAPH_INTR 0x00400100 +#define NV03_PGRAPH_NSTATUS 0x00400104 +# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11) +# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12) +# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13) +# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14) +# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23) +# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24) +# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25) +# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26) +#define NV03_PGRAPH_NSOURCE 0x00400108 +# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<<0) +# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<<1) +# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<<2) +# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<<3) +# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<<4) +# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<<5) +# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<<6) +# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<<7) +# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<<8) +# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<<9) +# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10) +# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11) +# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12) +# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13) +# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14) +# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15) +# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16) +# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17) +# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18) +#define NV03_PGRAPH_INTR_EN 0x00400140 +#define NV40_PGRAPH_INTR_EN 0x0040013C +# define NV_PGRAPH_INTR_NOTIFY (1<<0) +# define NV_PGRAPH_INTR_MISSING_HW (1<<4) +# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12) +# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16) +# define NV_PGRAPH_INTR_ERROR (1<<20) +#define NV10_PGRAPH_CTX_CONTROL 0x00400144 +#define NV10_PGRAPH_CTX_USER 0x00400148 +#define NV10_PGRAPH_CTX_SWITCH(i) (0x0040014C + 0x4*(i)) +#define NV04_PGRAPH_CTX_SWITCH1 0x00400160 +#define NV10_PGRAPH_CTX_CACHE(i, j) (0x00400160 \ + + 0x4*(i) + 0x20*(j)) +#define NV04_PGRAPH_CTX_SWITCH2 0x00400164 +#define NV04_PGRAPH_CTX_SWITCH3 0x00400168 +#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C +#define NV04_PGRAPH_CTX_CONTROL 0x00400170 +#define NV04_PGRAPH_CTX_USER 0x00400174 +#define NV04_PGRAPH_CTX_CACHE1 0x00400180 +#define NV03_PGRAPH_CTX_CONTROL 0x00400190 +#define NV03_PGRAPH_CTX_USER 0x00400194 +#define NV04_PGRAPH_CTX_CACHE2 0x004001A0 +#define NV04_PGRAPH_CTX_CACHE3 0x004001C0 +#define NV04_PGRAPH_CTX_CACHE4 0x004001E0 +#define NV40_PGRAPH_CTXCTL_0304 0x00400304 +#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff +#define NV40_PGRAPH_CTXCTL_0310 0x00400310 +#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020 +#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040 +#define NV40_PGRAPH_CTXCTL_030C 0x0040030c +#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324 +#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328 +#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c +#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000 +#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE 0x000FFFFF +#define NV40_PGRAPH_CTXCTL_NEXT 0x00400330 +#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE 0x000fffff +#define NV50_PGRAPH_CTXCTL_CUR 0x0040032c +#define NV50_PGRAPH_CTXCTL_CUR_LOADED 0x80000000 +#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE 0x00ffffff +#define NV50_PGRAPH_CTXCTL_NEXT 0x00400330 +#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE 0x00ffffff +#define NV03_PGRAPH_ABS_X_RAM 0x00400400 +#define NV03_PGRAPH_ABS_Y_RAM 0x00400480 +#define NV03_PGRAPH_X_MISC 0x00400500 +#define NV03_PGRAPH_Y_MISC 0x00400504 +#define NV04_PGRAPH_VALID1 0x00400508 +#define NV04_PGRAPH_SOURCE_COLOR 0x0040050C +#define NV04_PGRAPH_MISC24_0 0x00400510 +#define NV03_PGRAPH_XY_LOGIC_MISC0 0x00400514 +#define NV03_PGRAPH_XY_LOGIC_MISC1 0x00400518 +#define NV03_PGRAPH_XY_LOGIC_MISC2 0x0040051C +#define NV03_PGRAPH_XY_LOGIC_MISC3 0x00400520 +#define NV03_PGRAPH_CLIPX_0 0x00400524 +#define NV03_PGRAPH_CLIPX_1 0x00400528 +#define NV03_PGRAPH_CLIPY_0 0x0040052C +#define NV03_PGRAPH_CLIPY_1 0x00400530 +#define NV03_PGRAPH_ABS_ICLIP_XMAX 0x00400534 +#define NV03_PGRAPH_ABS_ICLIP_YMAX 0x00400538 +#define NV03_PGRAPH_ABS_UCLIP_XMIN 0x0040053C +#define NV03_PGRAPH_ABS_UCLIP_YMIN 0x00400540 +#define NV03_PGRAPH_ABS_UCLIP_XMAX 0x00400544 +#define NV03_PGRAPH_ABS_UCLIP_YMAX 0x00400548 +#define NV03_PGRAPH_ABS_UCLIPA_XMIN 0x00400560 +#define NV03_PGRAPH_ABS_UCLIPA_YMIN 0x00400564 +#define NV03_PGRAPH_ABS_UCLIPA_XMAX 0x00400568 +#define NV03_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C +#define NV04_PGRAPH_MISC24_1 0x00400570 +#define NV04_PGRAPH_MISC24_2 0x00400574 +#define NV04_PGRAPH_VALID2 0x00400578 +#define NV04_PGRAPH_PASSTHRU_0 0x0040057C +#define NV04_PGRAPH_PASSTHRU_1 0x00400580 +#define NV04_PGRAPH_PASSTHRU_2 0x00400584 +#define NV10_PGRAPH_DIMX_TEXTURE 0x00400588 +#define NV10_PGRAPH_WDIMX_TEXTURE 0x0040058C +#define NV04_PGRAPH_COMBINE_0_ALPHA 0x00400590 +#define NV04_PGRAPH_COMBINE_0_COLOR 0x00400594 +#define NV04_PGRAPH_COMBINE_1_ALPHA 0x00400598 +#define NV04_PGRAPH_COMBINE_1_COLOR 0x0040059C +#define NV04_PGRAPH_FORMAT_0 0x004005A8 +#define NV04_PGRAPH_FORMAT_1 0x004005AC +#define NV04_PGRAPH_FILTER_0 0x004005B0 +#define NV04_PGRAPH_FILTER_1 0x004005B4 +#define NV03_PGRAPH_MONO_COLOR0 0x00400600 +#define NV04_PGRAPH_ROP3 0x00400604 +#define NV04_PGRAPH_BETA_AND 0x00400608 +#define NV04_PGRAPH_BETA_PREMULT 0x0040060C +#define NV04_PGRAPH_LIMIT_VIOL_PIX 0x00400610 +#define NV04_PGRAPH_FORMATS 0x00400618 +#define NV10_PGRAPH_DEBUG_2 0x00400620 +#define NV04_PGRAPH_BOFFSET0 0x00400640 +#define NV04_PGRAPH_BOFFSET1 0x00400644 +#define NV04_PGRAPH_BOFFSET2 0x00400648 +#define NV04_PGRAPH_BOFFSET3 0x0040064C +#define NV04_PGRAPH_BOFFSET4 0x00400650 +#define NV04_PGRAPH_BOFFSET5 0x00400654 +#define NV04_PGRAPH_BBASE0 0x00400658 +#define NV04_PGRAPH_BBASE1 0x0040065C +#define NV04_PGRAPH_BBASE2 0x00400660 +#define NV04_PGRAPH_BBASE3 0x00400664 +#define NV04_PGRAPH_BBASE4 0x00400668 +#define NV04_PGRAPH_BBASE5 0x0040066C +#define NV04_PGRAPH_BPITCH0 0x00400670 +#define NV04_PGRAPH_BPITCH1 0x00400674 +#define NV04_PGRAPH_BPITCH2 0x00400678 +#define NV04_PGRAPH_BPITCH3 0x0040067C +#define NV04_PGRAPH_BPITCH4 0x00400680 +#define NV04_PGRAPH_BLIMIT0 0x00400684 +#define NV04_PGRAPH_BLIMIT1 0x00400688 +#define NV04_PGRAPH_BLIMIT2 0x0040068C +#define NV04_PGRAPH_BLIMIT3 0x00400690 +#define NV04_PGRAPH_BLIMIT4 0x00400694 +#define NV04_PGRAPH_BLIMIT5 0x00400698 +#define NV04_PGRAPH_BSWIZZLE2 0x0040069C +#define NV04_PGRAPH_BSWIZZLE5 0x004006A0 +#define NV03_PGRAPH_STATUS 0x004006B0 +#define NV04_PGRAPH_STATUS 0x00400700 +# define NV40_PGRAPH_STATUS_SYNC_STALL 0x00004000 +#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704 +#define NV04_PGRAPH_TRAPPED_DATA 0x00400708 +#define NV04_PGRAPH_SURFACE 0x0040070C +#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C +#define NV04_PGRAPH_STATE 0x00400710 +#define NV10_PGRAPH_SURFACE 0x00400710 +#define NV04_PGRAPH_NOTIFY 0x00400714 +#define NV10_PGRAPH_STATE 0x00400714 +#define NV10_PGRAPH_NOTIFY 0x00400718 + +#define NV04_PGRAPH_FIFO 0x00400720 + +#define NV04_PGRAPH_BPIXEL 0x00400724 +#define NV10_PGRAPH_RDI_INDEX 0x00400750 +#define NV04_PGRAPH_FFINTFC_ST2 0x00400754 +#define NV10_PGRAPH_RDI_DATA 0x00400754 +#define NV04_PGRAPH_DMA_PITCH 0x00400760 +#define NV10_PGRAPH_FFINTFC_FIFO_PTR 0x00400760 +#define NV04_PGRAPH_DVD_COLORFMT 0x00400764 +#define NV10_PGRAPH_FFINTFC_ST2 0x00400764 +#define NV04_PGRAPH_SCALED_FORMAT 0x00400768 +#define NV10_PGRAPH_FFINTFC_ST2_DL 0x00400768 +#define NV10_PGRAPH_FFINTFC_ST2_DH 0x0040076c +#define NV10_PGRAPH_DMA_PITCH 0x00400770 +#define NV10_PGRAPH_DVD_COLORFMT 0x00400774 +#define NV10_PGRAPH_SCALED_FORMAT 0x00400778 +#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780 +#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784 +#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788 +#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001 +#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002 +#define NV04_PGRAPH_PATT_COLOR0 0x00400800 +#define NV04_PGRAPH_PATT_COLOR1 0x00400804 +#define NV04_PGRAPH_PATTERN 0x00400808 +#define NV04_PGRAPH_PATTERN_SHAPE 0x00400810 +#define NV04_PGRAPH_CHROMA 0x00400814 +#define NV04_PGRAPH_CONTROL0 0x00400818 +#define NV04_PGRAPH_CONTROL1 0x0040081C +#define NV04_PGRAPH_CONTROL2 0x00400820 +#define NV04_PGRAPH_BLEND 0x00400824 +#define NV04_PGRAPH_STORED_FMT 0x00400830 +#define NV04_PGRAPH_PATT_COLORRAM 0x00400900 +#define NV20_PGRAPH_TILE(i) (0x00400900 + (i*16)) +#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16)) +#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16)) +#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16)) +#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i)) +#define NV41_PGRAPH_ZCOMP0(i) (0x004009c0 + 4*(i)) +#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) +#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) +#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) +#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16)) +#define NV04_PGRAPH_U_RAM 0x00400D00 +#define NV47_PGRAPH_TILE(i) (0x00400D00 + (i*16)) +#define NV47_PGRAPH_TLIMIT(i) (0x00400D04 + (i*16)) +#define NV47_PGRAPH_TSIZE(i) (0x00400D08 + (i*16)) +#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16)) +#define NV04_PGRAPH_V_RAM 0x00400D40 +#define NV04_PGRAPH_W_RAM 0x00400D80 +#define NV47_PGRAPH_ZCOMP0(i) (0x00400e00 + 4*(i)) +#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40 +#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44 +#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48 +#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C +#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50 +#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54 +#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58 +#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C +#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60 +#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64 +#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68 +#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C +#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00 +#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20 +#define NV10_PGRAPH_XFMODE0 0x00400F40 +#define NV10_PGRAPH_XFMODE1 0x00400F44 +#define NV10_PGRAPH_GLOBALSTATE0 0x00400F48 +#define NV10_PGRAPH_GLOBALSTATE1 0x00400F4C +#define NV10_PGRAPH_PIPE_ADDRESS 0x00400F50 +#define NV10_PGRAPH_PIPE_DATA 0x00400F54 +#define NV04_PGRAPH_DMA_START_0 0x00401000 +#define NV04_PGRAPH_DMA_START_1 0x00401004 +#define NV04_PGRAPH_DMA_LENGTH 0x00401008 +#define NV04_PGRAPH_DMA_MISC 0x0040100C +#define NV04_PGRAPH_DMA_DATA_0 0x00401020 +#define NV04_PGRAPH_DMA_DATA_1 0x00401024 +#define NV04_PGRAPH_DMA_RM 0x00401030 +#define NV04_PGRAPH_DMA_A_XLATE_INST 0x00401040 +#define NV04_PGRAPH_DMA_A_CONTROL 0x00401044 +#define NV04_PGRAPH_DMA_A_LIMIT 0x00401048 +#define NV04_PGRAPH_DMA_A_TLB_PTE 0x0040104C +#define NV04_PGRAPH_DMA_A_TLB_TAG 0x00401050 +#define NV04_PGRAPH_DMA_A_ADJ_OFFSET 0x00401054 +#define NV04_PGRAPH_DMA_A_OFFSET 0x00401058 +#define NV04_PGRAPH_DMA_A_SIZE 0x0040105C +#define NV04_PGRAPH_DMA_A_Y_SIZE 0x00401060 +#define NV04_PGRAPH_DMA_B_XLATE_INST 0x00401080 +#define NV04_PGRAPH_DMA_B_CONTROL 0x00401084 +#define NV04_PGRAPH_DMA_B_LIMIT 0x00401088 +#define NV04_PGRAPH_DMA_B_TLB_PTE 0x0040108C +#define NV04_PGRAPH_DMA_B_TLB_TAG 0x00401090 +#define NV04_PGRAPH_DMA_B_ADJ_OFFSET 0x00401094 +#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098 +#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C +#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0 +#define NV47_PGRAPH_ZCOMP1(i) (0x004068c0 + 4*(i)) +#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16)) +#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16)) +#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16)) +#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16)) +#define NV40_PGRAPH_ZCOMP1(i) (0x00406980 + 4*(i)) +#define NV41_PGRAPH_ZCOMP1(i) (0x004069c0 + 4*(i)) + +#endif diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c new file mode 100644 index 0000000..49ecbb8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c @@ -0,0 +1,311 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/client.h> +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> +#include <core/handle.h> + +#include <subdev/fb.h> +#include <subdev/timer.h> +#include <subdev/instmem.h> + +#include <engine/fifo.h> +#include <engine/mpeg.h> +#include <engine/graph/nv40.h> + +struct nv31_mpeg_priv { + struct nouveau_mpeg base; + atomic_t refcount; +}; + +struct nv31_mpeg_chan { + struct nouveau_object base; +}; + +/******************************************************************************* + * MPEG object classes + ******************************************************************************/ + +static int +nv31_mpeg_object_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_gpuobj *obj; + int ret; + + ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent, + 20, 16, 0, &obj); + *pobject = nv_object(obj); + if (ret) + return ret; + + nv_wo32(obj, 0x00, nv_mclass(obj)); + nv_wo32(obj, 0x04, 0x00000000); + nv_wo32(obj, 0x08, 0x00000000); + nv_wo32(obj, 0x0c, 0x00000000); + return 0; +} + +static int +nv31_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len) +{ + struct nouveau_instmem *imem = nouveau_instmem(object); + struct nv31_mpeg_priv *priv = (void *)object->engine; + u32 inst = *(u32 *)arg << 4; + u32 dma0 = nv_ro32(imem, inst + 0); + u32 dma1 = nv_ro32(imem, inst + 4); + u32 dma2 = nv_ro32(imem, inst + 8); + u32 base = (dma2 & 0xfffff000) | (dma0 >> 20); + u32 size = dma1 + 1; + + /* only allow linear DMA objects */ + if (!(dma0 & 0x00002000)) + return -EINVAL; + + if (mthd == 0x0190) { + /* DMA_CMD */ + nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000)); + nv_wr32(priv, 0x00b334, base); + nv_wr32(priv, 0x00b324, size); + } else + if (mthd == 0x01a0) { + /* DMA_DATA */ + nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2); + nv_wr32(priv, 0x00b360, base); + nv_wr32(priv, 0x00b364, size); + } else { + /* DMA_IMAGE, VRAM only */ + if (dma0 & 0x000c0000) + return -EINVAL; + + nv_wr32(priv, 0x00b370, base); + nv_wr32(priv, 0x00b374, size); + } + + return 0; +} + +static struct nouveau_ofuncs +nv31_mpeg_ofuncs = { + .ctor = nv31_mpeg_object_ctor, + .dtor = _nouveau_gpuobj_dtor, + .init = _nouveau_gpuobj_init, + .fini = _nouveau_gpuobj_fini, + .rd32 = _nouveau_gpuobj_rd32, + .wr32 = _nouveau_gpuobj_wr32, +}; + +static struct nouveau_omthds +nv31_mpeg_omthds[] = { + { 0x0190, 0x0190, nv31_mpeg_mthd_dma }, + { 0x01a0, 0x01a0, nv31_mpeg_mthd_dma }, + { 0x01b0, 0x01b0, nv31_mpeg_mthd_dma }, + {} +}; + +struct nouveau_oclass +nv31_mpeg_sclass[] = { + { 0x3174, &nv31_mpeg_ofuncs, nv31_mpeg_omthds }, + {} +}; + +/******************************************************************************* + * PMPEG context + ******************************************************************************/ + +static int +nv31_mpeg_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv31_mpeg_priv *priv = (void *)engine; + struct nv31_mpeg_chan *chan; + int ret; + + if (!atomic_add_unless(&priv->refcount, 1, 1)) + return -EBUSY; + + ret = nouveau_object_create(parent, engine, oclass, 0, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + return 0; +} + +static void +nv31_mpeg_context_dtor(struct nouveau_object *object) +{ + struct nv31_mpeg_priv *priv = (void *)object->engine; + struct nv31_mpeg_chan *chan = (void *)object; + atomic_dec(&priv->refcount); + nouveau_object_destroy(&chan->base); +} + +static struct nouveau_oclass +nv31_mpeg_cclass = { + .handle = NV_ENGCTX(MPEG, 0x31), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv31_mpeg_context_ctor, + .dtor = nv31_mpeg_context_dtor, + .init = nouveau_object_init, + .fini = nouveau_object_fini, + }, +}; + +/******************************************************************************* + * PMPEG engine/subdev functions + ******************************************************************************/ + +void +nv31_mpeg_tile_prog(struct nouveau_engine *engine, int i) +{ + struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i]; + struct nv31_mpeg_priv *priv = (void *)engine; + + nv_wr32(priv, 0x00b008 + (i * 0x10), tile->pitch); + nv_wr32(priv, 0x00b004 + (i * 0x10), tile->limit); + nv_wr32(priv, 0x00b000 + (i * 0x10), tile->addr); +} + +void +nv31_mpeg_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_fifo *pfifo = nouveau_fifo(subdev); + struct nouveau_engine *engine = nv_engine(subdev); + struct nouveau_object *engctx; + struct nouveau_handle *handle; + struct nv31_mpeg_priv *priv = (void *)subdev; + u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff; + u32 stat = nv_rd32(priv, 0x00b100); + u32 type = nv_rd32(priv, 0x00b230); + u32 mthd = nv_rd32(priv, 0x00b234); + u32 data = nv_rd32(priv, 0x00b238); + u32 show = stat; + int chid; + + engctx = nouveau_engctx_get(engine, inst); + chid = pfifo->chid(pfifo, engctx); + + if (stat & 0x01000000) { + /* happens on initial binding of the object */ + if (type == 0x00000020 && mthd == 0x0000) { + nv_mask(priv, 0x00b308, 0x00000000, 0x00000000); + show &= ~0x01000000; + } + + if (type == 0x00000010) { + handle = nouveau_handle_get_class(engctx, 0x3174); + if (handle && !nv_call(handle->object, mthd, data)) + show &= ~0x01000000; + nouveau_handle_put(handle); + } + } + + nv_wr32(priv, 0x00b100, stat); + nv_wr32(priv, 0x00b230, 0x00000001); + + if (show) { + nv_error(priv, + "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n", + chid, inst << 4, nouveau_client_name(engctx), stat, + type, mthd, data); + } + + nouveau_engctx_put(engctx); +} + +static int +nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv31_mpeg_priv *priv; + int ret; + + ret = nouveau_mpeg_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00000002; + nv_subdev(priv)->intr = nv31_mpeg_intr; + nv_engine(priv)->cclass = &nv31_mpeg_cclass; + nv_engine(priv)->sclass = nv31_mpeg_sclass; + nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog; + return 0; +} + +int +nv31_mpeg_init(struct nouveau_object *object) +{ + struct nouveau_engine *engine = nv_engine(object->engine); + struct nv31_mpeg_priv *priv = (void *)engine; + struct nouveau_fb *pfb = nouveau_fb(object); + int ret, i; + + ret = nouveau_mpeg_init(&priv->base); + if (ret) + return ret; + + /* VPE init */ + nv_wr32(priv, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */ + nv_wr32(priv, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */ + + for (i = 0; i < pfb->tile.regions; i++) + engine->tile_prog(engine, i); + + /* PMPEG init */ + nv_wr32(priv, 0x00b32c, 0x00000000); + nv_wr32(priv, 0x00b314, 0x00000100); + nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031); + nv_wr32(priv, 0x00b300, 0x02001ec1); + nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); + + nv_wr32(priv, 0x00b100, 0xffffffff); + nv_wr32(priv, 0x00b140, 0xffffffff); + + if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) { + nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200)); + return -EBUSY; + } + + return 0; +} + +struct nouveau_oclass +nv31_mpeg_oclass = { + .handle = NV_ENGINE(MPEG, 0x31), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv31_mpeg_ctor, + .dtor = _nouveau_mpeg_dtor, + .init = nv31_mpeg_init, + .fini = _nouveau_mpeg_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c new file mode 100644 index 0000000..f7c581a --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c @@ -0,0 +1,144 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> + +#include <subdev/fb.h> +#include <subdev/timer.h> +#include <subdev/instmem.h> + +#include <engine/mpeg.h> +#include <engine/graph/nv40.h> + +struct nv40_mpeg_priv { + struct nouveau_mpeg base; +}; + +struct nv40_mpeg_chan { + struct nouveau_mpeg_chan base; +}; + +/******************************************************************************* + * PMPEG context + ******************************************************************************/ + +static int +nv40_mpeg_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv40_mpeg_chan *chan; + int ret; + + ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL, + 264 * 4, 16, + NVOBJ_FLAG_ZERO_ALLOC, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + return 0; +} + +static int +nv40_mpeg_context_fini(struct nouveau_object *object, bool suspend) +{ + + struct nv40_mpeg_priv *priv = (void *)object->engine; + struct nv40_mpeg_chan *chan = (void *)object; + u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4; + + nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000); + if (nv_rd32(priv, 0x00b318) == inst) + nv_mask(priv, 0x00b318, 0x80000000, 0x00000000); + nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); + return 0; +} + +static struct nouveau_oclass +nv40_mpeg_cclass = { + .handle = NV_ENGCTX(MPEG, 0x40), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv40_mpeg_context_ctor, + .dtor = _nouveau_mpeg_context_dtor, + .init = _nouveau_mpeg_context_init, + .fini = nv40_mpeg_context_fini, + .rd32 = _nouveau_mpeg_context_rd32, + .wr32 = _nouveau_mpeg_context_wr32, + }, +}; + +/******************************************************************************* + * PMPEG engine/subdev functions + ******************************************************************************/ + +static void +nv40_mpeg_intr(struct nouveau_subdev *subdev) +{ + struct nv40_mpeg_priv *priv = (void *)subdev; + u32 stat; + + if ((stat = nv_rd32(priv, 0x00b100))) + nv31_mpeg_intr(subdev); + + if ((stat = nv_rd32(priv, 0x00b800))) { + nv_error(priv, "PMSRCH 0x%08x\n", stat); + nv_wr32(priv, 0x00b800, stat); + } +} + +static int +nv40_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv40_mpeg_priv *priv; + int ret; + + ret = nouveau_mpeg_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00000002; + nv_subdev(priv)->intr = nv40_mpeg_intr; + nv_engine(priv)->cclass = &nv40_mpeg_cclass; + nv_engine(priv)->sclass = nv31_mpeg_sclass; + nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog; + return 0; +} + +struct nouveau_oclass +nv40_mpeg_oclass = { + .handle = NV_ENGINE(MPEG, 0x40), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv40_mpeg_ctor, + .dtor = _nouveau_mpeg_dtor, + .init = nv31_mpeg_init, + .fini = _nouveau_mpeg_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c new file mode 100644 index 0000000..bc7d12b --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c @@ -0,0 +1,239 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> + +#include <subdev/vm.h> +#include <subdev/bar.h> +#include <subdev/timer.h> + +#include <engine/mpeg.h> + +struct nv50_mpeg_priv { + struct nouveau_mpeg base; +}; + +struct nv50_mpeg_chan { + struct nouveau_mpeg_chan base; +}; + +/******************************************************************************* + * MPEG object classes + ******************************************************************************/ + +static int +nv50_mpeg_object_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_gpuobj *obj; + int ret; + + ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent, + 16, 16, 0, &obj); + *pobject = nv_object(obj); + if (ret) + return ret; + + nv_wo32(obj, 0x00, nv_mclass(obj)); + nv_wo32(obj, 0x04, 0x00000000); + nv_wo32(obj, 0x08, 0x00000000); + nv_wo32(obj, 0x0c, 0x00000000); + return 0; +} + +struct nouveau_ofuncs +nv50_mpeg_ofuncs = { + .ctor = nv50_mpeg_object_ctor, + .dtor = _nouveau_gpuobj_dtor, + .init = _nouveau_gpuobj_init, + .fini = _nouveau_gpuobj_fini, + .rd32 = _nouveau_gpuobj_rd32, + .wr32 = _nouveau_gpuobj_wr32, +}; + +static struct nouveau_oclass +nv50_mpeg_sclass[] = { + { 0x3174, &nv50_mpeg_ofuncs }, + {} +}; + +/******************************************************************************* + * PMPEG context + ******************************************************************************/ + +int +nv50_mpeg_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_bar *bar = nouveau_bar(parent); + struct nv50_mpeg_chan *chan; + int ret; + + ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL, 128 * 4, + 0, NVOBJ_FLAG_ZERO_ALLOC, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + nv_wo32(chan, 0x0070, 0x00801ec1); + nv_wo32(chan, 0x007c, 0x0000037c); + bar->flush(bar); + return 0; +} + +static struct nouveau_oclass +nv50_mpeg_cclass = { + .handle = NV_ENGCTX(MPEG, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_mpeg_context_ctor, + .dtor = _nouveau_mpeg_context_dtor, + .init = _nouveau_mpeg_context_init, + .fini = _nouveau_mpeg_context_fini, + .rd32 = _nouveau_mpeg_context_rd32, + .wr32 = _nouveau_mpeg_context_wr32, + }, +}; + +/******************************************************************************* + * PMPEG engine/subdev functions + ******************************************************************************/ + +int +nv50_mpeg_tlb_flush(struct nouveau_engine *engine) +{ + nv50_vm_flush_engine(&engine->base, 0x08); + return 0; +} + +void +nv50_mpeg_intr(struct nouveau_subdev *subdev) +{ + struct nv50_mpeg_priv *priv = (void *)subdev; + u32 stat = nv_rd32(priv, 0x00b100); + u32 type = nv_rd32(priv, 0x00b230); + u32 mthd = nv_rd32(priv, 0x00b234); + u32 data = nv_rd32(priv, 0x00b238); + u32 show = stat; + + if (stat & 0x01000000) { + /* happens on initial binding of the object */ + if (type == 0x00000020 && mthd == 0x0000) { + nv_wr32(priv, 0x00b308, 0x00000100); + show &= ~0x01000000; + } + } + + if (show) { + nv_info(priv, "0x%08x 0x%08x 0x%08x 0x%08x\n", + stat, type, mthd, data); + } + + nv_wr32(priv, 0x00b100, stat); + nv_wr32(priv, 0x00b230, 0x00000001); +} + +static void +nv50_vpe_intr(struct nouveau_subdev *subdev) +{ + struct nv50_mpeg_priv *priv = (void *)subdev; + + if (nv_rd32(priv, 0x00b100)) + nv50_mpeg_intr(subdev); + + if (nv_rd32(priv, 0x00b800)) { + u32 stat = nv_rd32(priv, 0x00b800); + nv_info(priv, "PMSRCH: 0x%08x\n", stat); + nv_wr32(priv, 0xb800, stat); + } +} + +static int +nv50_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_mpeg_priv *priv; + int ret; + + ret = nouveau_mpeg_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00400002; + nv_subdev(priv)->intr = nv50_vpe_intr; + nv_engine(priv)->cclass = &nv50_mpeg_cclass; + nv_engine(priv)->sclass = nv50_mpeg_sclass; + nv_engine(priv)->tlb_flush = nv50_mpeg_tlb_flush; + return 0; +} + +int +nv50_mpeg_init(struct nouveau_object *object) +{ + struct nv50_mpeg_priv *priv = (void *)object; + int ret; + + ret = nouveau_mpeg_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x00b32c, 0x00000000); + nv_wr32(priv, 0x00b314, 0x00000100); + nv_wr32(priv, 0x00b0e0, 0x0000001a); + + nv_wr32(priv, 0x00b220, 0x00000044); + nv_wr32(priv, 0x00b300, 0x00801ec1); + nv_wr32(priv, 0x00b390, 0x00000000); + nv_wr32(priv, 0x00b394, 0x00000000); + nv_wr32(priv, 0x00b398, 0x00000000); + nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); + + nv_wr32(priv, 0x00b100, 0xffffffff); + nv_wr32(priv, 0x00b140, 0xffffffff); + + if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) { + nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200)); + return -EBUSY; + } + + return 0; +} + +struct nouveau_oclass +nv50_mpeg_oclass = { + .handle = NV_ENGINE(MPEG, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_mpeg_ctor, + .dtor = _nouveau_mpeg_dtor, + .init = nv50_mpeg_init, + .fini = _nouveau_mpeg_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c new file mode 100644 index 0000000..8f805b4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c @@ -0,0 +1,104 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> + +#include <subdev/vm.h> +#include <subdev/bar.h> +#include <subdev/timer.h> + +#include <engine/mpeg.h> + +struct nv84_mpeg_priv { + struct nouveau_mpeg base; +}; + +struct nv84_mpeg_chan { + struct nouveau_mpeg_chan base; +}; + +/******************************************************************************* + * MPEG object classes + ******************************************************************************/ + +static struct nouveau_oclass +nv84_mpeg_sclass[] = { + { 0x8274, &nv50_mpeg_ofuncs }, + {} +}; + +/******************************************************************************* + * PMPEG context + ******************************************************************************/ + +static struct nouveau_oclass +nv84_mpeg_cclass = { + .handle = NV_ENGCTX(MPEG, 0x84), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_mpeg_context_ctor, + .dtor = _nouveau_mpeg_context_dtor, + .init = _nouveau_mpeg_context_init, + .fini = _nouveau_mpeg_context_fini, + .rd32 = _nouveau_mpeg_context_rd32, + .wr32 = _nouveau_mpeg_context_wr32, + }, +}; + +/******************************************************************************* + * PMPEG engine/subdev functions + ******************************************************************************/ + +static int +nv84_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv84_mpeg_priv *priv; + int ret; + + ret = nouveau_mpeg_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00000002; + nv_subdev(priv)->intr = nv50_mpeg_intr; + nv_engine(priv)->cclass = &nv84_mpeg_cclass; + nv_engine(priv)->sclass = nv84_mpeg_sclass; + nv_engine(priv)->tlb_flush = nv50_mpeg_tlb_flush; + return 0; +} + +struct nouveau_oclass +nv84_mpeg_oclass = { + .handle = NV_ENGINE(MPEG, 0x84), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv84_mpeg_ctor, + .dtor = _nouveau_mpeg_dtor, + .init = nv50_mpeg_init, + .fini = _nouveau_mpeg_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c new file mode 100644 index 0000000..5a5b2a7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c @@ -0,0 +1,98 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/engine.h> +#include <core/engctx.h> +#include <core/class.h> + +#include <engine/ppp.h> + +struct nv98_ppp_priv { + struct nouveau_engine base; +}; + +struct nv98_ppp_chan { + struct nouveau_engctx base; +}; + +/******************************************************************************* + * PPP object classes + ******************************************************************************/ + +static struct nouveau_oclass +nv98_ppp_sclass[] = { + {}, +}; + +/******************************************************************************* + * PPPP context + ******************************************************************************/ + +static struct nouveau_oclass +nv98_ppp_cclass = { + .handle = NV_ENGCTX(PPP, 0x98), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nouveau_engctx_ctor, + .dtor = _nouveau_engctx_dtor, + .init = _nouveau_engctx_init, + .fini = _nouveau_engctx_fini, + .rd32 = _nouveau_engctx_rd32, + .wr32 = _nouveau_engctx_wr32, + }, +}; + +/******************************************************************************* + * PPPP engine/subdev functions + ******************************************************************************/ + +static int +nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv98_ppp_priv *priv; + int ret; + + ret = nouveau_engine_create(parent, engine, oclass, true, + "PPPP", "ppp", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00400002; + nv_engine(priv)->cclass = &nv98_ppp_cclass; + nv_engine(priv)->sclass = nv98_ppp_sclass; + return 0; +} + +struct nouveau_oclass +nv98_ppp_oclass = { + .handle = NV_ENGINE(PPP, 0x98), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv98_ppp_ctor, + .dtor = _nouveau_engine_dtor, + .init = _nouveau_engine_init, + .fini = _nouveau_engine_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c new file mode 100644 index 0000000..ebf0d86 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c @@ -0,0 +1,110 @@ +/* + * Copyright 2012 Maarten Lankhorst + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Maarten Lankhorst + */ + +#include <core/falcon.h> + +#include <engine/ppp.h> + +struct nvc0_ppp_priv { + struct nouveau_falcon base; +}; + +/******************************************************************************* + * PPP object classes + ******************************************************************************/ + +static struct nouveau_oclass +nvc0_ppp_sclass[] = { + { 0x90b3, &nouveau_object_ofuncs }, + {}, +}; + +/******************************************************************************* + * PPPP context + ******************************************************************************/ + +static struct nouveau_oclass +nvc0_ppp_cclass = { + .handle = NV_ENGCTX(PPP, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nouveau_falcon_context_ctor, + .dtor = _nouveau_falcon_context_dtor, + .init = _nouveau_falcon_context_init, + .fini = _nouveau_falcon_context_fini, + .rd32 = _nouveau_falcon_context_rd32, + .wr32 = _nouveau_falcon_context_wr32, + }, +}; + +/******************************************************************************* + * PPPP engine/subdev functions + ******************************************************************************/ + +static int +nvc0_ppp_init(struct nouveau_object *object) +{ + struct nvc0_ppp_priv *priv = (void *)object; + int ret; + + ret = nouveau_falcon_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x086010, 0x0000fff2); + nv_wr32(priv, 0x08601c, 0x0000fff2); + return 0; +} + +static int +nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_ppp_priv *priv; + int ret; + + ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true, + "PPPP", "ppp", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00000002; + nv_engine(priv)->cclass = &nvc0_ppp_cclass; + nv_engine(priv)->sclass = nvc0_ppp_sclass; + return 0; +} + +struct nouveau_oclass +nvc0_ppp_oclass = { + .handle = NV_ENGINE(PPP, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_ppp_ctor, + .dtor = _nouveau_falcon_dtor, + .init = nvc0_ppp_init, + .fini = _nouveau_falcon_fini, + .rd32 = _nouveau_falcon_rd32, + .wr32 = _nouveau_falcon_wr32, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c new file mode 100644 index 0000000..2a859a3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c @@ -0,0 +1,147 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> + +#include <engine/software.h> +#include <engine/fifo.h> + +struct nv04_software_priv { + struct nouveau_software base; +}; + +struct nv04_software_chan { + struct nouveau_software_chan base; +}; + +/******************************************************************************* + * software object classes + ******************************************************************************/ + +static int +nv04_software_set_ref(struct nouveau_object *object, u32 mthd, + void *data, u32 size) +{ + struct nouveau_object *channel = (void *)nv_engctx(object->parent); + struct nouveau_fifo_chan *fifo = (void *)channel->parent; + atomic_set(&fifo->refcnt, *(u32*)data); + return 0; +} + +static int +nv04_software_flip(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + struct nv04_software_chan *chan = (void *)nv_engctx(object->parent); + if (chan->base.flip) + return chan->base.flip(chan->base.flip_data); + return -EINVAL; +} + +static struct nouveau_omthds +nv04_software_omthds[] = { + { 0x0150, 0x0150, nv04_software_set_ref }, + { 0x0500, 0x0500, nv04_software_flip }, + {} +}; + +static struct nouveau_oclass +nv04_software_sclass[] = { + { 0x006e, &nouveau_object_ofuncs, nv04_software_omthds }, + {} +}; + +/******************************************************************************* + * software context + ******************************************************************************/ + +static int +nv04_software_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_software_chan *chan; + int ret; + + ret = nouveau_software_context_create(parent, engine, oclass, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + return 0; +} + +static struct nouveau_oclass +nv04_software_cclass = { + .handle = NV_ENGCTX(SW, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_software_context_ctor, + .dtor = _nouveau_software_context_dtor, + .init = _nouveau_software_context_init, + .fini = _nouveau_software_context_fini, + }, +}; + +/******************************************************************************* + * software engine/subdev functions + ******************************************************************************/ + +void +nv04_software_intr(struct nouveau_subdev *subdev) +{ + nv_mask(subdev, 0x000100, 0x80000000, 0x00000000); +} + +static int +nv04_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_software_priv *priv; + int ret; + + ret = nouveau_software_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_engine(priv)->cclass = &nv04_software_cclass; + nv_engine(priv)->sclass = nv04_software_sclass; + nv_subdev(priv)->intr = nv04_software_intr; + return 0; +} + +struct nouveau_oclass +nv04_software_oclass = { + .handle = NV_ENGINE(SW, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_software_ctor, + .dtor = _nouveau_software_dtor, + .init = _nouveau_software_init, + .fini = _nouveau_software_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c new file mode 100644 index 0000000..a019364 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c @@ -0,0 +1,129 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> + +#include <engine/software.h> + +struct nv10_software_priv { + struct nouveau_software base; +}; + +struct nv10_software_chan { + struct nouveau_software_chan base; +}; + +/******************************************************************************* + * software object classes + ******************************************************************************/ + +static int +nv10_software_flip(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + struct nv10_software_chan *chan = (void *)nv_engctx(object->parent); + if (chan->base.flip) + return chan->base.flip(chan->base.flip_data); + return -EINVAL; +} + +static struct nouveau_omthds +nv10_software_omthds[] = { + { 0x0500, 0x0500, nv10_software_flip }, + {} +}; + +static struct nouveau_oclass +nv10_software_sclass[] = { + { 0x016e, &nouveau_object_ofuncs, nv10_software_omthds }, + {} +}; + +/******************************************************************************* + * software context + ******************************************************************************/ + +static int +nv10_software_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv10_software_chan *chan; + int ret; + + ret = nouveau_software_context_create(parent, engine, oclass, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + return 0; +} + +static struct nouveau_oclass +nv10_software_cclass = { + .handle = NV_ENGCTX(SW, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv10_software_context_ctor, + .dtor = _nouveau_software_context_dtor, + .init = _nouveau_software_context_init, + .fini = _nouveau_software_context_fini, + }, +}; + +/******************************************************************************* + * software engine/subdev functions + ******************************************************************************/ + +static int +nv10_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv10_software_priv *priv; + int ret; + + ret = nouveau_software_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_engine(priv)->cclass = &nv10_software_cclass; + nv_engine(priv)->sclass = nv10_software_sclass; + nv_subdev(priv)->intr = nv04_software_intr; + return 0; +} + +struct nouveau_oclass +nv10_software_oclass = { + .handle = NV_ENGINE(SW, 0x10), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv10_software_ctor, + .dtor = _nouveau_software_dtor, + .init = _nouveau_software_init, + .fini = _nouveau_software_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c new file mode 100644 index 0000000..c48e749 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c @@ -0,0 +1,219 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> +#include <core/namedb.h> +#include <core/handle.h> +#include <core/gpuobj.h> +#include <core/event.h> + +#include <subdev/bar.h> + +#include <engine/software.h> +#include <engine/disp.h> + +struct nv50_software_priv { + struct nouveau_software base; +}; + +struct nv50_software_chan { + struct nouveau_software_chan base; +}; + +/******************************************************************************* + * software object classes + ******************************************************************************/ + +static int +nv50_software_mthd_dma_vblsem(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + struct nv50_software_chan *chan = (void *)nv_engctx(object->parent); + struct nouveau_fifo_chan *fifo = (void *)nv_object(chan)->parent; + struct nouveau_handle *handle; + int ret = -EINVAL; + + handle = nouveau_namedb_get(nv_namedb(fifo), *(u32 *)args); + if (!handle) + return -ENOENT; + + if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) { + struct nouveau_gpuobj *gpuobj = nv_gpuobj(handle->object); + chan->base.vblank.ctxdma = gpuobj->node->offset >> 4; + ret = 0; + } + nouveau_namedb_put(handle); + return ret; +} + +static int +nv50_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + struct nv50_software_chan *chan = (void *)nv_engctx(object->parent); + chan->base.vblank.offset = *(u32 *)args; + return 0; +} + +static int +nv50_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + struct nv50_software_chan *chan = (void *)nv_engctx(object->parent); + chan->base.vblank.value = *(u32 *)args; + return 0; +} + +static int +nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + struct nv50_software_chan *chan = (void *)nv_engctx(object->parent); + struct nouveau_disp *disp = nouveau_disp(object); + u32 crtc = *(u32 *)args; + if (crtc > 1) + return -EINVAL; + + nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event); + return 0; +} + +static int +nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + struct nv50_software_chan *chan = (void *)nv_engctx(object->parent); + if (chan->base.flip) + return chan->base.flip(chan->base.flip_data); + return -EINVAL; +} + +static struct nouveau_omthds +nv50_software_omthds[] = { + { 0x018c, 0x018c, nv50_software_mthd_dma_vblsem }, + { 0x0400, 0x0400, nv50_software_mthd_vblsem_offset }, + { 0x0404, 0x0404, nv50_software_mthd_vblsem_value }, + { 0x0408, 0x0408, nv50_software_mthd_vblsem_release }, + { 0x0500, 0x0500, nv50_software_mthd_flip }, + {} +}; + +static struct nouveau_oclass +nv50_software_sclass[] = { + { 0x506e, &nouveau_object_ofuncs, nv50_software_omthds }, + {} +}; + +/******************************************************************************* + * software context + ******************************************************************************/ + +static int +nv50_software_vblsem_release(struct nouveau_eventh *event, int head) +{ + struct nouveau_software_chan *chan = + container_of(event, struct nouveau_software_chan, vblank.event); + struct nv50_software_priv *priv = (void *)nv_object(chan)->engine; + struct nouveau_bar *bar = nouveau_bar(priv); + + nv_wr32(priv, 0x001704, chan->vblank.channel); + nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma); + bar->flush(bar); + + if (nv_device(priv)->chipset == 0x50) { + nv_wr32(priv, 0x001570, chan->vblank.offset); + nv_wr32(priv, 0x001574, chan->vblank.value); + } else { + nv_wr32(priv, 0x060010, chan->vblank.offset); + nv_wr32(priv, 0x060014, chan->vblank.value); + } + + return NVKM_EVENT_DROP; +} + +static int +nv50_software_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_software_chan *chan; + int ret; + + ret = nouveau_software_context_create(parent, engine, oclass, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12; + chan->base.vblank.event.func = nv50_software_vblsem_release; + return 0; +} + +static struct nouveau_oclass +nv50_software_cclass = { + .handle = NV_ENGCTX(SW, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_software_context_ctor, + .dtor = _nouveau_software_context_dtor, + .init = _nouveau_software_context_init, + .fini = _nouveau_software_context_fini, + }, +}; + +/******************************************************************************* + * software engine/subdev functions + ******************************************************************************/ + +static int +nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_software_priv *priv; + int ret; + + ret = nouveau_software_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_engine(priv)->cclass = &nv50_software_cclass; + nv_engine(priv)->sclass = nv50_software_sclass; + nv_subdev(priv)->intr = nv04_software_intr; + return 0; +} + +struct nouveau_oclass +nv50_software_oclass = { + .handle = NV_ENGINE(SW, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_software_ctor, + .dtor = _nouveau_software_dtor, + .init = _nouveau_software_init, + .fini = _nouveau_software_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c new file mode 100644 index 0000000..d698e71 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c @@ -0,0 +1,225 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/os.h> +#include <core/class.h> +#include <core/engctx.h> +#include <core/event.h> + +#include <subdev/bar.h> + +#include <engine/software.h> +#include <engine/disp.h> + +struct nvc0_software_priv { + struct nouveau_software base; +}; + +struct nvc0_software_chan { + struct nouveau_software_chan base; +}; + +/******************************************************************************* + * software object classes + ******************************************************************************/ + +static int +nvc0_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent); + u64 data = *(u32 *)args; + if (mthd == 0x0400) { + chan->base.vblank.offset &= 0x00ffffffffULL; + chan->base.vblank.offset |= data << 32; + } else { + chan->base.vblank.offset &= 0xff00000000ULL; + chan->base.vblank.offset |= data; + } + return 0; +} + +static int +nvc0_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent); + chan->base.vblank.value = *(u32 *)args; + return 0; +} + +static int +nvc0_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent); + struct nouveau_disp *disp = nouveau_disp(object); + u32 crtc = *(u32 *)args; + + if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3) + return -EINVAL; + + nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event); + return 0; +} + +static int +nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent); + if (chan->base.flip) + return chan->base.flip(chan->base.flip_data); + return -EINVAL; +} + +static int +nvc0_software_mthd_mp_control(struct nouveau_object *object, u32 mthd, + void *args, u32 size) +{ + struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent); + struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine; + u32 data = *(u32 *)args; + + switch (mthd) { + case 0x600: + nv_wr32(priv, 0x419e00, data); /* MP.PM_UNK000 */ + break; + case 0x644: + if (data & ~0x1ffffe) + return -EINVAL; + nv_wr32(priv, 0x419e44, data); /* MP.TRAP_WARP_ERROR_EN */ + break; + case 0x6ac: + nv_wr32(priv, 0x419eac, data); /* MP.PM_UNK0AC */ + break; + default: + return -EINVAL; + } + return 0; +} + +static struct nouveau_omthds +nvc0_software_omthds[] = { + { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset }, + { 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset }, + { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value }, + { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release }, + { 0x0500, 0x0500, nvc0_software_mthd_flip }, + { 0x0600, 0x0600, nvc0_software_mthd_mp_control }, + { 0x0644, 0x0644, nvc0_software_mthd_mp_control }, + { 0x06ac, 0x06ac, nvc0_software_mthd_mp_control }, + {} +}; + +static struct nouveau_oclass +nvc0_software_sclass[] = { + { 0x906e, &nouveau_object_ofuncs, nvc0_software_omthds }, + {} +}; + +/******************************************************************************* + * software context + ******************************************************************************/ + +static int +nvc0_software_vblsem_release(struct nouveau_eventh *event, int head) +{ + struct nouveau_software_chan *chan = + container_of(event, struct nouveau_software_chan, vblank.event); + struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine; + struct nouveau_bar *bar = nouveau_bar(priv); + + nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel); + bar->flush(bar); + nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset)); + nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset)); + nv_wr32(priv, 0x060014, chan->vblank.value); + + return NVKM_EVENT_DROP; +} + +static int +nvc0_software_context_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_software_chan *chan; + int ret; + + ret = nouveau_software_context_create(parent, engine, oclass, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12; + chan->base.vblank.event.func = nvc0_software_vblsem_release; + return 0; +} + +static struct nouveau_oclass +nvc0_software_cclass = { + .handle = NV_ENGCTX(SW, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_software_context_ctor, + .dtor = _nouveau_software_context_dtor, + .init = _nouveau_software_context_init, + .fini = _nouveau_software_context_fini, + }, +}; + +/******************************************************************************* + * software engine/subdev functions + ******************************************************************************/ + +static int +nvc0_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_software_priv *priv; + int ret; + + ret = nouveau_software_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_engine(priv)->cclass = &nvc0_software_cclass; + nv_engine(priv)->sclass = nvc0_software_sclass; + nv_subdev(priv)->intr = nv04_software_intr; + return 0; +} + +struct nouveau_oclass +nvc0_software_oclass = { + .handle = NV_ENGINE(SW, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_software_ctor, + .dtor = _nouveau_software_dtor, + .init = _nouveau_software_init, + .fini = _nouveau_software_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c new file mode 100644 index 0000000..261cd96 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c @@ -0,0 +1,93 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/engctx.h> +#include <core/class.h> + +#include <engine/vp.h> + +struct nv84_vp_priv { + struct nouveau_engine base; +}; + +/******************************************************************************* + * VP object classes + ******************************************************************************/ + +static struct nouveau_oclass +nv84_vp_sclass[] = { + {}, +}; + +/******************************************************************************* + * PVP context + ******************************************************************************/ + +static struct nouveau_oclass +nv84_vp_cclass = { + .handle = NV_ENGCTX(VP, 0x84), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nouveau_engctx_ctor, + .dtor = _nouveau_engctx_dtor, + .init = _nouveau_engctx_init, + .fini = _nouveau_engctx_fini, + .rd32 = _nouveau_engctx_rd32, + .wr32 = _nouveau_engctx_wr32, + }, +}; + +/******************************************************************************* + * PVP engine/subdev functions + ******************************************************************************/ + +static int +nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv84_vp_priv *priv; + int ret; + + ret = nouveau_engine_create(parent, engine, oclass, true, + "PVP", "vp", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x01020000; + nv_engine(priv)->cclass = &nv84_vp_cclass; + nv_engine(priv)->sclass = nv84_vp_sclass; + return 0; +} + +struct nouveau_oclass +nv84_vp_oclass = { + .handle = NV_ENGINE(VP, 0x84), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv84_vp_ctor, + .dtor = _nouveau_engine_dtor, + .init = _nouveau_engine_init, + .fini = _nouveau_engine_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c new file mode 100644 index 0000000..f761949 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c @@ -0,0 +1,110 @@ +/* + * Copyright 2012 Maarten Lankhorst + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Maarten Lankhorst + */ + +#include <core/falcon.h> + +#include <engine/vp.h> + +struct nvc0_vp_priv { + struct nouveau_falcon base; +}; + +/******************************************************************************* + * VP object classes + ******************************************************************************/ + +static struct nouveau_oclass +nvc0_vp_sclass[] = { + { 0x90b2, &nouveau_object_ofuncs }, + {}, +}; + +/******************************************************************************* + * PVP context + ******************************************************************************/ + +static struct nouveau_oclass +nvc0_vp_cclass = { + .handle = NV_ENGCTX(VP, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nouveau_falcon_context_ctor, + .dtor = _nouveau_falcon_context_dtor, + .init = _nouveau_falcon_context_init, + .fini = _nouveau_falcon_context_fini, + .rd32 = _nouveau_falcon_context_rd32, + .wr32 = _nouveau_falcon_context_wr32, + }, +}; + +/******************************************************************************* + * PVP engine/subdev functions + ******************************************************************************/ + +static int +nvc0_vp_init(struct nouveau_object *object) +{ + struct nvc0_vp_priv *priv = (void *)object; + int ret; + + ret = nouveau_falcon_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x085010, 0x0000fff2); + nv_wr32(priv, 0x08501c, 0x0000fff2); + return 0; +} + +static int +nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_vp_priv *priv; + int ret; + + ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true, + "PVP", "vp", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00020000; + nv_engine(priv)->cclass = &nvc0_vp_cclass; + nv_engine(priv)->sclass = nvc0_vp_sclass; + return 0; +} + +struct nouveau_oclass +nvc0_vp_oclass = { + .handle = NV_ENGINE(VP, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_vp_ctor, + .dtor = _nouveau_falcon_dtor, + .init = nvc0_vp_init, + .fini = _nouveau_falcon_fini, + .rd32 = _nouveau_falcon_rd32, + .wr32 = _nouveau_falcon_wr32, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c new file mode 100644 index 0000000..2384ce5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c @@ -0,0 +1,110 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/falcon.h> + +#include <engine/vp.h> + +struct nve0_vp_priv { + struct nouveau_falcon base; +}; + +/******************************************************************************* + * VP object classes + ******************************************************************************/ + +static struct nouveau_oclass +nve0_vp_sclass[] = { + { 0x95b2, &nouveau_object_ofuncs }, + {}, +}; + +/******************************************************************************* + * PVP context + ******************************************************************************/ + +static struct nouveau_oclass +nve0_vp_cclass = { + .handle = NV_ENGCTX(VP, 0xe0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nouveau_falcon_context_ctor, + .dtor = _nouveau_falcon_context_dtor, + .init = _nouveau_falcon_context_init, + .fini = _nouveau_falcon_context_fini, + .rd32 = _nouveau_falcon_context_rd32, + .wr32 = _nouveau_falcon_context_wr32, + }, +}; + +/******************************************************************************* + * PVP engine/subdev functions + ******************************************************************************/ + +static int +nve0_vp_init(struct nouveau_object *object) +{ + struct nve0_vp_priv *priv = (void *)object; + int ret; + + ret = nouveau_falcon_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x085010, 0x0000fff2); + nv_wr32(priv, 0x08501c, 0x0000fff2); + return 0; +} + +static int +nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nve0_vp_priv *priv; + int ret; + + ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true, + "PVP", "vp", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->unit = 0x00020000; + nv_engine(priv)->cclass = &nve0_vp_cclass; + nv_engine(priv)->sclass = nve0_vp_sclass; + return 0; +} + +struct nouveau_oclass +nve0_vp_oclass = { + .handle = NV_ENGINE(VP, 0xe0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nve0_vp_ctor, + .dtor = _nouveau_falcon_dtor, + .init = nve0_vp_init, + .fini = _nouveau_falcon_fini, + .rd32 = _nouveau_falcon_rd32, + .wr32 = _nouveau_falcon_wr32, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h new file mode 100644 index 0000000..5a5961b --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/class.h @@ -0,0 +1,361 @@ +#ifndef __NOUVEAU_CLASS_H__ +#define __NOUVEAU_CLASS_H__ + +/* Device class + * + * 0080: NV_DEVICE + */ +#define NV_DEVICE_CLASS 0x00000080 + +#define NV_DEVICE_DISABLE_IDENTIFY 0x0000000000000001ULL +#define NV_DEVICE_DISABLE_MMIO 0x0000000000000002ULL +#define NV_DEVICE_DISABLE_VBIOS 0x0000000000000004ULL +#define NV_DEVICE_DISABLE_CORE 0x0000000000000008ULL +#define NV_DEVICE_DISABLE_DISP 0x0000000000010000ULL +#define NV_DEVICE_DISABLE_FIFO 0x0000000000020000ULL +#define NV_DEVICE_DISABLE_GRAPH 0x0000000100000000ULL +#define NV_DEVICE_DISABLE_MPEG 0x0000000200000000ULL +#define NV_DEVICE_DISABLE_ME 0x0000000400000000ULL +#define NV_DEVICE_DISABLE_VP 0x0000000800000000ULL +#define NV_DEVICE_DISABLE_CRYPT 0x0000001000000000ULL +#define NV_DEVICE_DISABLE_BSP 0x0000002000000000ULL +#define NV_DEVICE_DISABLE_PPP 0x0000004000000000ULL +#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL +#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL +#define NV_DEVICE_DISABLE_UNK1C1 0x0000020000000000ULL +#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL + +struct nv_device_class { + u64 device; /* device identifier, ~0 for client default */ + u64 disable; /* disable particular subsystems */ + u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */ +}; + +/* DMA object classes + * + * 0002: NV_DMA_FROM_MEMORY + * 0003: NV_DMA_TO_MEMORY + * 003d: NV_DMA_IN_MEMORY + */ +#define NV_DMA_FROM_MEMORY_CLASS 0x00000002 +#define NV_DMA_TO_MEMORY_CLASS 0x00000003 +#define NV_DMA_IN_MEMORY_CLASS 0x0000003d + +#define NV_DMA_TARGET_MASK 0x000000ff +#define NV_DMA_TARGET_VM 0x00000000 +#define NV_DMA_TARGET_VRAM 0x00000001 +#define NV_DMA_TARGET_PCI 0x00000002 +#define NV_DMA_TARGET_PCI_US 0x00000003 +#define NV_DMA_TARGET_AGP 0x00000004 +#define NV_DMA_ACCESS_MASK 0x00000f00 +#define NV_DMA_ACCESS_VM 0x00000000 +#define NV_DMA_ACCESS_RD 0x00000100 +#define NV_DMA_ACCESS_WR 0x00000200 +#define NV_DMA_ACCESS_RDWR 0x00000300 + +/* NV50:NVC0 */ +#define NV50_DMA_CONF0_ENABLE 0x80000000 +#define NV50_DMA_CONF0_PRIV 0x00300000 +#define NV50_DMA_CONF0_PRIV_VM 0x00000000 +#define NV50_DMA_CONF0_PRIV_US 0x00100000 +#define NV50_DMA_CONF0_PRIV__S 0x00200000 +#define NV50_DMA_CONF0_PART 0x00030000 +#define NV50_DMA_CONF0_PART_VM 0x00000000 +#define NV50_DMA_CONF0_PART_256 0x00010000 +#define NV50_DMA_CONF0_PART_1KB 0x00020000 +#define NV50_DMA_CONF0_COMP 0x00000180 +#define NV50_DMA_CONF0_COMP_NONE 0x00000000 +#define NV50_DMA_CONF0_COMP_VM 0x00000180 +#define NV50_DMA_CONF0_TYPE 0x0000007f +#define NV50_DMA_CONF0_TYPE_LINEAR 0x00000000 +#define NV50_DMA_CONF0_TYPE_VM 0x0000007f + +/* NVC0:NVD9 */ +#define NVC0_DMA_CONF0_ENABLE 0x80000000 +#define NVC0_DMA_CONF0_PRIV 0x00300000 +#define NVC0_DMA_CONF0_PRIV_VM 0x00000000 +#define NVC0_DMA_CONF0_PRIV_US 0x00100000 +#define NVC0_DMA_CONF0_PRIV__S 0x00200000 +#define NVC0_DMA_CONF0_UNKN /* PART? */ 0x00030000 +#define NVC0_DMA_CONF0_TYPE 0x000000ff +#define NVC0_DMA_CONF0_TYPE_LINEAR 0x00000000 +#define NVC0_DMA_CONF0_TYPE_VM 0x000000ff + +/* NVD9- */ +#define NVD0_DMA_CONF0_ENABLE 0x80000000 +#define NVD0_DMA_CONF0_PAGE 0x00000400 +#define NVD0_DMA_CONF0_PAGE_LP 0x00000000 +#define NVD0_DMA_CONF0_PAGE_SP 0x00000400 +#define NVD0_DMA_CONF0_TYPE 0x000000ff +#define NVD0_DMA_CONF0_TYPE_LINEAR 0x00000000 +#define NVD0_DMA_CONF0_TYPE_VM 0x000000ff + +struct nv_dma_class { + u32 flags; + u32 pad0; + u64 start; + u64 limit; + u32 conf0; +}; + +/* DMA FIFO channel classes + * + * 006b: NV03_CHANNEL_DMA + * 006e: NV10_CHANNEL_DMA + * 176e: NV17_CHANNEL_DMA + * 406e: NV40_CHANNEL_DMA + * 506e: NV50_CHANNEL_DMA + * 826e: NV84_CHANNEL_DMA + */ +#define NV03_CHANNEL_DMA_CLASS 0x0000006b +#define NV10_CHANNEL_DMA_CLASS 0x0000006e +#define NV17_CHANNEL_DMA_CLASS 0x0000176e +#define NV40_CHANNEL_DMA_CLASS 0x0000406e +#define NV50_CHANNEL_DMA_CLASS 0x0000506e +#define NV84_CHANNEL_DMA_CLASS 0x0000826e + +struct nv03_channel_dma_class { + u32 pushbuf; + u32 pad0; + u64 offset; +}; + +/* Indirect FIFO channel classes + * + * 506f: NV50_CHANNEL_IND + * 826f: NV84_CHANNEL_IND + * 906f: NVC0_CHANNEL_IND + * a06f: NVE0_CHANNEL_IND + */ + +#define NV50_CHANNEL_IND_CLASS 0x0000506f +#define NV84_CHANNEL_IND_CLASS 0x0000826f +#define NVC0_CHANNEL_IND_CLASS 0x0000906f +#define NVE0_CHANNEL_IND_CLASS 0x0000a06f + +struct nv50_channel_ind_class { + u32 pushbuf; + u32 ilength; + u64 ioffset; +}; + +#define NVE0_CHANNEL_IND_ENGINE_GR 0x00000001 +#define NVE0_CHANNEL_IND_ENGINE_VP 0x00000002 +#define NVE0_CHANNEL_IND_ENGINE_PPP 0x00000004 +#define NVE0_CHANNEL_IND_ENGINE_BSP 0x00000008 +#define NVE0_CHANNEL_IND_ENGINE_CE0 0x00000010 +#define NVE0_CHANNEL_IND_ENGINE_CE1 0x00000020 +#define NVE0_CHANNEL_IND_ENGINE_ENC 0x00000040 + +struct nve0_channel_ind_class { + u32 pushbuf; + u32 ilength; + u64 ioffset; + u32 engine; +}; + +/* 0046: NV04_DISP + */ + +#define NV04_DISP_CLASS 0x00000046 + +struct nv04_display_class { +}; + +/* 5070: NV50_DISP + * 8270: NV84_DISP + * 8370: NVA0_DISP + * 8870: NV94_DISP + * 8570: NVA3_DISP + * 9070: NVD0_DISP + * 9170: NVE0_DISP + * 9270: NVF0_DISP + */ + +#define NV50_DISP_CLASS 0x00005070 +#define NV84_DISP_CLASS 0x00008270 +#define NVA0_DISP_CLASS 0x00008370 +#define NV94_DISP_CLASS 0x00008870 +#define NVA3_DISP_CLASS 0x00008570 +#define NVD0_DISP_CLASS 0x00009070 +#define NVE0_DISP_CLASS 0x00009170 +#define NVF0_DISP_CLASS 0x00009270 + +#define NV50_DISP_SOR_MTHD 0x00010000 +#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000 +#define NV50_DISP_SOR_MTHD_HEAD 0x00000018 +#define NV50_DISP_SOR_MTHD_LINK 0x00000004 +#define NV50_DISP_SOR_MTHD_OR 0x00000003 + +#define NV50_DISP_SOR_PWR 0x00010000 +#define NV50_DISP_SOR_PWR_STATE 0x00000001 +#define NV50_DISP_SOR_PWR_STATE_ON 0x00000001 +#define NV50_DISP_SOR_PWR_STATE_OFF 0x00000000 +#define NVA3_DISP_SOR_HDA_ELD 0x00010100 +#define NV84_DISP_SOR_HDMI_PWR 0x00012000 +#define NV84_DISP_SOR_HDMI_PWR_STATE 0x40000000 +#define NV84_DISP_SOR_HDMI_PWR_STATE_OFF 0x00000000 +#define NV84_DISP_SOR_HDMI_PWR_STATE_ON 0x40000000 +#define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET 0x001f0000 +#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f +#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000 +#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff + +#define NV50_DISP_DAC_MTHD 0x00020000 +#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000 +#define NV50_DISP_DAC_MTHD_OR 0x00000003 + +#define NV50_DISP_DAC_PWR 0x00020000 +#define NV50_DISP_DAC_PWR_HSYNC 0x00000001 +#define NV50_DISP_DAC_PWR_HSYNC_ON 0x00000000 +#define NV50_DISP_DAC_PWR_HSYNC_LO 0x00000001 +#define NV50_DISP_DAC_PWR_VSYNC 0x00000004 +#define NV50_DISP_DAC_PWR_VSYNC_ON 0x00000000 +#define NV50_DISP_DAC_PWR_VSYNC_LO 0x00000004 +#define NV50_DISP_DAC_PWR_DATA 0x00000010 +#define NV50_DISP_DAC_PWR_DATA_ON 0x00000000 +#define NV50_DISP_DAC_PWR_DATA_LO 0x00000010 +#define NV50_DISP_DAC_PWR_STATE 0x00000040 +#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000 +#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040 +#define NV50_DISP_DAC_LOAD 0x00020100 +#define NV50_DISP_DAC_LOAD_VALUE 0x00000007 + +#define NV50_DISP_PIOR_MTHD 0x00030000 +#define NV50_DISP_PIOR_MTHD_TYPE 0x0000f000 +#define NV50_DISP_PIOR_MTHD_OR 0x00000003 + +#define NV50_DISP_PIOR_PWR 0x00030000 +#define NV50_DISP_PIOR_PWR_STATE 0x00000001 +#define NV50_DISP_PIOR_PWR_STATE_ON 0x00000001 +#define NV50_DISP_PIOR_PWR_STATE_OFF 0x00000000 +#define NV50_DISP_PIOR_TMDS_PWR 0x00032000 +#define NV50_DISP_PIOR_TMDS_PWR_STATE 0x00000001 +#define NV50_DISP_PIOR_TMDS_PWR_STATE_ON 0x00000001 +#define NV50_DISP_PIOR_TMDS_PWR_STATE_OFF 0x00000000 +#define NV50_DISP_PIOR_DP_PWR 0x00036000 +#define NV50_DISP_PIOR_DP_PWR_STATE 0x00000001 +#define NV50_DISP_PIOR_DP_PWR_STATE_ON 0x00000001 +#define NV50_DISP_PIOR_DP_PWR_STATE_OFF 0x00000000 + +struct nv50_display_class { +}; + +/* 507a: NV50_DISP_CURS + * 827a: NV84_DISP_CURS + * 837a: NVA0_DISP_CURS + * 887a: NV94_DISP_CURS + * 857a: NVA3_DISP_CURS + * 907a: NVD0_DISP_CURS + * 917a: NVE0_DISP_CURS + * 927a: NVF0_DISP_CURS + */ + +#define NV50_DISP_CURS_CLASS 0x0000507a +#define NV84_DISP_CURS_CLASS 0x0000827a +#define NVA0_DISP_CURS_CLASS 0x0000837a +#define NV94_DISP_CURS_CLASS 0x0000887a +#define NVA3_DISP_CURS_CLASS 0x0000857a +#define NVD0_DISP_CURS_CLASS 0x0000907a +#define NVE0_DISP_CURS_CLASS 0x0000917a +#define NVF0_DISP_CURS_CLASS 0x0000927a + +struct nv50_display_curs_class { + u32 head; +}; + +/* 507b: NV50_DISP_OIMM + * 827b: NV84_DISP_OIMM + * 837b: NVA0_DISP_OIMM + * 887b: NV94_DISP_OIMM + * 857b: NVA3_DISP_OIMM + * 907b: NVD0_DISP_OIMM + * 917b: NVE0_DISP_OIMM + * 927b: NVE0_DISP_OIMM + */ + +#define NV50_DISP_OIMM_CLASS 0x0000507b +#define NV84_DISP_OIMM_CLASS 0x0000827b +#define NVA0_DISP_OIMM_CLASS 0x0000837b +#define NV94_DISP_OIMM_CLASS 0x0000887b +#define NVA3_DISP_OIMM_CLASS 0x0000857b +#define NVD0_DISP_OIMM_CLASS 0x0000907b +#define NVE0_DISP_OIMM_CLASS 0x0000917b +#define NVF0_DISP_OIMM_CLASS 0x0000927b + +struct nv50_display_oimm_class { + u32 head; +}; + +/* 507c: NV50_DISP_SYNC + * 827c: NV84_DISP_SYNC + * 837c: NVA0_DISP_SYNC + * 887c: NV94_DISP_SYNC + * 857c: NVA3_DISP_SYNC + * 907c: NVD0_DISP_SYNC + * 917c: NVE0_DISP_SYNC + * 927c: NVF0_DISP_SYNC + */ + +#define NV50_DISP_SYNC_CLASS 0x0000507c +#define NV84_DISP_SYNC_CLASS 0x0000827c +#define NVA0_DISP_SYNC_CLASS 0x0000837c +#define NV94_DISP_SYNC_CLASS 0x0000887c +#define NVA3_DISP_SYNC_CLASS 0x0000857c +#define NVD0_DISP_SYNC_CLASS 0x0000907c +#define NVE0_DISP_SYNC_CLASS 0x0000917c +#define NVF0_DISP_SYNC_CLASS 0x0000927c + +struct nv50_display_sync_class { + u32 pushbuf; + u32 head; +}; + +/* 507d: NV50_DISP_MAST + * 827d: NV84_DISP_MAST + * 837d: NVA0_DISP_MAST + * 887d: NV94_DISP_MAST + * 857d: NVA3_DISP_MAST + * 907d: NVD0_DISP_MAST + * 917d: NVE0_DISP_MAST + * 927d: NVF0_DISP_MAST + */ + +#define NV50_DISP_MAST_CLASS 0x0000507d +#define NV84_DISP_MAST_CLASS 0x0000827d +#define NVA0_DISP_MAST_CLASS 0x0000837d +#define NV94_DISP_MAST_CLASS 0x0000887d +#define NVA3_DISP_MAST_CLASS 0x0000857d +#define NVD0_DISP_MAST_CLASS 0x0000907d +#define NVE0_DISP_MAST_CLASS 0x0000917d +#define NVF0_DISP_MAST_CLASS 0x0000927d + +struct nv50_display_mast_class { + u32 pushbuf; +}; + +/* 507e: NV50_DISP_OVLY + * 827e: NV84_DISP_OVLY + * 837e: NVA0_DISP_OVLY + * 887e: NV94_DISP_OVLY + * 857e: NVA3_DISP_OVLY + * 907e: NVD0_DISP_OVLY + * 917e: NVE0_DISP_OVLY + * 927e: NVF0_DISP_OVLY + */ + +#define NV50_DISP_OVLY_CLASS 0x0000507e +#define NV84_DISP_OVLY_CLASS 0x0000827e +#define NVA0_DISP_OVLY_CLASS 0x0000837e +#define NV94_DISP_OVLY_CLASS 0x0000887e +#define NVA3_DISP_OVLY_CLASS 0x0000857e +#define NVD0_DISP_OVLY_CLASS 0x0000907e +#define NVE0_DISP_OVLY_CLASS 0x0000917e +#define NVF0_DISP_OVLY_CLASS 0x0000927e + +struct nv50_display_ovly_class { + u32 pushbuf; + u32 head; +}; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h new file mode 100644 index 0000000..c284c8c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/client.h @@ -0,0 +1,46 @@ +#ifndef __NOUVEAU_CLIENT_H__ +#define __NOUVEAU_CLIENT_H__ + +#include <core/namedb.h> + +struct nouveau_client { + struct nouveau_namedb base; + struct nouveau_handle *root; + struct nouveau_object *device; + char name[32]; + u32 debug; + struct nouveau_vm *vm; +}; + +static inline struct nouveau_client * +nv_client(void *obj) +{ +#if CPTCFG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA + if (unlikely(!nv_iclass(obj, NV_CLIENT_CLASS))) + nv_assert("BAD CAST -> NvClient, %08x", nv_hclass(obj)); +#endif + return obj; +} + +static inline struct nouveau_client * +nouveau_client(void *obj) +{ + struct nouveau_object *client = nv_object(obj); + while (client && !(nv_iclass(client, NV_CLIENT_CLASS))) + client = client->parent; + return (void *)client; +} + +#define nouveau_client_create(n,c,oc,od,d) \ + nouveau_client_create_((n), (c), (oc), (od), sizeof(**d), (void **)d) + +int nouveau_client_create_(const char *name, u64 device, const char *cfg, + const char *dbg, int, void **); +#define nouveau_client_destroy(p) \ + nouveau_namedb_destroy(&(p)->base) + +int nouveau_client_init(struct nouveau_client *); +int nouveau_client_fini(struct nouveau_client *, bool suspend); +const char *nouveau_client_name(void *obj); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/debug.h b/drivers/gpu/drm/nouveau/core/include/core/debug.h new file mode 100644 index 0000000..9ea18df --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/debug.h @@ -0,0 +1,13 @@ +#ifndef __NOUVEAU_DEBUG_H__ +#define __NOUVEAU_DEBUG_H__ + +#define NV_DBG_FATAL 0 +#define NV_DBG_ERROR 1 +#define NV_DBG_WARN 2 +#define NV_DBG_INFO 3 +#define NV_DBG_DEBUG 4 +#define NV_DBG_TRACE 5 +#define NV_DBG_PARANOIA 6 +#define NV_DBG_SPAM 7 + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h new file mode 100644 index 0000000..29013ad --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/device.h @@ -0,0 +1,137 @@ +#ifndef __NOUVEAU_DEVICE_H__ +#define __NOUVEAU_DEVICE_H__ + +#include <core/object.h> +#include <core/subdev.h> +#include <core/engine.h> + +enum nv_subdev_type { + NVDEV_ENGINE_DEVICE, + NVDEV_SUBDEV_VBIOS, + + /* All subdevs from DEVINIT to DEVINIT_LAST will be created before + * *any* of them are initialised. This subdev category is used + * for any subdevs that the VBIOS init table parsing may call out + * to during POST. + */ + NVDEV_SUBDEV_DEVINIT, + NVDEV_SUBDEV_GPIO, + NVDEV_SUBDEV_I2C, + NVDEV_SUBDEV_CLOCK, + NVDEV_SUBDEV_DEVINIT_LAST = NVDEV_SUBDEV_CLOCK, + + /* This grouping of subdevs are initialised right after they've + * been created, and are allowed to assume any subdevs in the + * list above them exist and have been initialised. + */ + NVDEV_SUBDEV_MXM, + NVDEV_SUBDEV_MC, + NVDEV_SUBDEV_BUS, + NVDEV_SUBDEV_TIMER, + NVDEV_SUBDEV_FB, + NVDEV_SUBDEV_LTCG, + NVDEV_SUBDEV_IBUS, + NVDEV_SUBDEV_INSTMEM, + NVDEV_SUBDEV_VM, + NVDEV_SUBDEV_BAR, + NVDEV_SUBDEV_VOLT, + NVDEV_SUBDEV_THERM, + + NVDEV_ENGINE_DMAOBJ, + NVDEV_ENGINE_FIFO, + NVDEV_ENGINE_SW, + NVDEV_ENGINE_GR, + NVDEV_ENGINE_MPEG, + NVDEV_ENGINE_ME, + NVDEV_ENGINE_VP, + NVDEV_ENGINE_CRYPT, + NVDEV_ENGINE_BSP, + NVDEV_ENGINE_PPP, + NVDEV_ENGINE_COPY0, + NVDEV_ENGINE_COPY1, + NVDEV_ENGINE_UNK1C1, + NVDEV_ENGINE_VENC, + NVDEV_ENGINE_DISP, + + NVDEV_SUBDEV_NR, +}; + +struct nouveau_device { + struct nouveau_engine base; + struct list_head head; + + struct pci_dev *pdev; + u64 handle; + + const char *cfgopt; + const char *dbgopt; + const char *name; + const char *cname; + + enum { + NV_04 = 0x04, + NV_10 = 0x10, + NV_20 = 0x20, + NV_30 = 0x30, + NV_40 = 0x40, + NV_50 = 0x50, + NV_C0 = 0xc0, + NV_D0 = 0xd0, + NV_E0 = 0xe0, + } card_type; + u32 chipset; + u32 crystal; + + struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR]; + struct nouveau_object *subdev[NVDEV_SUBDEV_NR]; +}; + +static inline struct nouveau_device * +nv_device(void *obj) +{ + struct nouveau_object *object = nv_object(obj); + struct nouveau_object *device = object; + + if (device->engine) + device = device->engine; + if (device->parent) + device = device->parent; + +#if CPTCFG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA + if (unlikely(!nv_iclass(device, NV_SUBDEV_CLASS) || + (nv_hclass(device) & 0xff) != NVDEV_ENGINE_DEVICE)) { + nv_assert("BAD CAST -> NvDevice, 0x%08x 0x%08x", + nv_hclass(object), nv_hclass(device)); + } +#endif + + return (void *)device; +} + +static inline struct nouveau_subdev * +nouveau_subdev(void *obj, int sub) +{ + if (nv_device(obj)->subdev[sub]) + return nv_subdev(nv_device(obj)->subdev[sub]); + return NULL; +} + +static inline struct nouveau_engine * +nouveau_engine(void *obj, int sub) +{ + struct nouveau_subdev *subdev = nouveau_subdev(obj, sub); + if (subdev && nv_iclass(subdev, NV_ENGINE_CLASS)) + return nv_engine(subdev); + return NULL; +} + +static inline bool +nv_device_match(struct nouveau_object *object, u16 dev, u16 ven, u16 sub) +{ + struct nouveau_device *device = nv_device(object); + return device->pdev->device == dev && + device->pdev->subsystem_vendor == ven && + device->pdev->subsystem_device == sub; +} + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/engctx.h b/drivers/gpu/drm/nouveau/core/include/core/engctx.h new file mode 100644 index 0000000..97db3e6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/engctx.h @@ -0,0 +1,54 @@ +#ifndef __NOUVEAU_ENGCTX_H__ +#define __NOUVEAU_ENGCTX_H__ + +#include <core/object.h> +#include <core/gpuobj.h> + +#include <subdev/vm.h> + +#define NV_ENGCTX_(eng,var) (NV_ENGCTX_CLASS | ((var) << 8) | (eng)) +#define NV_ENGCTX(name,var) NV_ENGCTX_(NVDEV_ENGINE_##name, (var)) + +struct nouveau_engctx { + struct nouveau_gpuobj base; + struct nouveau_vma vma; + struct list_head head; + unsigned long save; + u64 addr; +}; + +static inline struct nouveau_engctx * +nv_engctx(void *obj) +{ +#if CPTCFG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA + if (unlikely(!nv_iclass(obj, NV_ENGCTX_CLASS))) + nv_assert("BAD CAST -> NvEngCtx, %08x", nv_hclass(obj)); +#endif + return obj; +} + +#define nouveau_engctx_create(p,e,c,g,s,a,f,d) \ + nouveau_engctx_create_((p), (e), (c), (g), (s), (a), (f), \ + sizeof(**d), (void **)d) + +int nouveau_engctx_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, struct nouveau_object *, + u32 size, u32 align, u32 flags, + int length, void **data); +void nouveau_engctx_destroy(struct nouveau_engctx *); +int nouveau_engctx_init(struct nouveau_engctx *); +int nouveau_engctx_fini(struct nouveau_engctx *, bool suspend); + +int _nouveau_engctx_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); +void _nouveau_engctx_dtor(struct nouveau_object *); +int _nouveau_engctx_init(struct nouveau_object *); +int _nouveau_engctx_fini(struct nouveau_object *, bool suspend); +#define _nouveau_engctx_rd32 _nouveau_gpuobj_rd32 +#define _nouveau_engctx_wr32 _nouveau_gpuobj_wr32 + +struct nouveau_object *nouveau_engctx_get(struct nouveau_engine *, u64 addr); +void nouveau_engctx_put(struct nouveau_object *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/engine.h b/drivers/gpu/drm/nouveau/core/include/core/engine.h new file mode 100644 index 0000000..4ceed7d --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/engine.h @@ -0,0 +1,57 @@ +#ifndef __NOUVEAU_ENGINE_H__ +#define __NOUVEAU_ENGINE_H__ + +#include <core/object.h> +#include <core/subdev.h> + +#define NV_ENGINE_(eng,var) (NV_ENGINE_CLASS | ((var) << 8) | (eng)) +#define NV_ENGINE(name,var) NV_ENGINE_(NVDEV_ENGINE_##name, (var)) + +struct nouveau_engine { + struct nouveau_subdev base; + struct nouveau_oclass *cclass; + struct nouveau_oclass *sclass; + + struct list_head contexts; + spinlock_t lock; + + void (*tile_prog)(struct nouveau_engine *, int region); + int (*tlb_flush)(struct nouveau_engine *); +}; + +static inline struct nouveau_engine * +nv_engine(void *obj) +{ +#if CPTCFG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA + if (unlikely(!nv_iclass(obj, NV_ENGINE_CLASS))) + nv_assert("BAD CAST -> NvEngine, %08x", nv_hclass(obj)); +#endif + return obj; +} + +static inline int +nv_engidx(struct nouveau_object *object) +{ + return nv_subidx(object); +} + +#define nouveau_engine_create(p,e,c,d,i,f,r) \ + nouveau_engine_create_((p), (e), (c), (d), (i), (f), \ + sizeof(**r),(void **)r) + +#define nouveau_engine_destroy(p) \ + nouveau_subdev_destroy(&(p)->base) +#define nouveau_engine_init(p) \ + nouveau_subdev_init(&(p)->base) +#define nouveau_engine_fini(p,s) \ + nouveau_subdev_fini(&(p)->base, (s)) + +int nouveau_engine_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, bool, const char *, + const char *, int, void **); + +#define _nouveau_engine_dtor _nouveau_subdev_dtor +#define _nouveau_engine_init _nouveau_subdev_init +#define _nouveau_engine_fini _nouveau_subdev_fini + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/enum.h b/drivers/gpu/drm/nouveau/core/include/core/enum.h new file mode 100644 index 0000000..4fc62bb --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/enum.h @@ -0,0 +1,24 @@ +#ifndef __NOUVEAU_ENUM_H__ +#define __NOUVEAU_ENUM_H__ + +struct nouveau_enum { + u32 value; + const char *name; + const void *data; + u32 data2; +}; + +const struct nouveau_enum * +nouveau_enum_find(const struct nouveau_enum *, u32 value); + +const struct nouveau_enum * +nouveau_enum_print(const struct nouveau_enum *en, u32 value); + +struct nouveau_bitfield { + u32 mask; + const char *name; +}; + +void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h new file mode 100644 index 0000000..9e09440 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/event.h @@ -0,0 +1,36 @@ +#ifndef __NVKM_EVENT_H__ +#define __NVKM_EVENT_H__ + +/* return codes from event handlers */ +#define NVKM_EVENT_DROP 0 +#define NVKM_EVENT_KEEP 1 + +struct nouveau_eventh { + struct list_head head; + int (*func)(struct nouveau_eventh *, int index); +}; + +struct nouveau_event { + spinlock_t lock; + + void *priv; + void (*enable)(struct nouveau_event *, int index); + void (*disable)(struct nouveau_event *, int index); + + int index_nr; + struct { + struct list_head list; + int refs; + } index[]; +}; + +int nouveau_event_create(int index_nr, struct nouveau_event **); +void nouveau_event_destroy(struct nouveau_event **); +void nouveau_event_trigger(struct nouveau_event *, int index); + +void nouveau_event_get(struct nouveau_event *, int index, + struct nouveau_eventh *); +void nouveau_event_put(struct nouveau_event *, int index, + struct nouveau_eventh *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/falcon.h b/drivers/gpu/drm/nouveau/core/include/core/falcon.h new file mode 100644 index 0000000..1edec38 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/falcon.h @@ -0,0 +1,81 @@ +#ifndef __NOUVEAU_FALCON_H__ +#define __NOUVEAU_FALCON_H__ + +#include <core/engine.h> +#include <core/engctx.h> +#include <core/gpuobj.h> + +struct nouveau_falcon_chan { + struct nouveau_engctx base; +}; + +#define nouveau_falcon_context_create(p,e,c,g,s,a,f,d) \ + nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d)) +#define nouveau_falcon_context_destroy(d) \ + nouveau_engctx_destroy(&(d)->base) +#define nouveau_falcon_context_init(d) \ + nouveau_engctx_init(&(d)->base) +#define nouveau_falcon_context_fini(d,s) \ + nouveau_engctx_fini(&(d)->base, (s)) + +#define _nouveau_falcon_context_ctor _nouveau_engctx_ctor +#define _nouveau_falcon_context_dtor _nouveau_engctx_dtor +#define _nouveau_falcon_context_init _nouveau_engctx_init +#define _nouveau_falcon_context_fini _nouveau_engctx_fini +#define _nouveau_falcon_context_rd32 _nouveau_engctx_rd32 +#define _nouveau_falcon_context_wr32 _nouveau_engctx_wr32 + +struct nouveau_falcon_data { + bool external; +}; + +struct nouveau_falcon { + struct nouveau_engine base; + + u32 addr; + u8 version; + u8 secret; + + struct nouveau_gpuobj *core; + bool external; + + struct { + u32 limit; + u32 *data; + u32 size; + } code; + + struct { + u32 limit; + u32 *data; + u32 size; + } data; +}; + +#define nv_falcon(priv) (&(priv)->base) + +#define nouveau_falcon_create(p,e,c,b,d,i,f,r) \ + nouveau_falcon_create_((p), (e), (c), (b), (d), (i), (f), \ + sizeof(**r),(void **)r) +#define nouveau_falcon_destroy(p) \ + nouveau_engine_destroy(&(p)->base) +#define nouveau_falcon_init(p) ({ \ + struct nouveau_falcon *falcon = (p); \ + _nouveau_falcon_init(nv_object(falcon)); \ +}) +#define nouveau_falcon_fini(p,s) ({ \ + struct nouveau_falcon *falcon = (p); \ + _nouveau_falcon_fini(nv_object(falcon), (s)); \ +}) + +int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, u32, bool, const char *, + const char *, int, void **); + +#define _nouveau_falcon_dtor _nouveau_engine_dtor +int _nouveau_falcon_init(struct nouveau_object *); +int _nouveau_falcon_fini(struct nouveau_object *, bool); +u32 _nouveau_falcon_rd32(struct nouveau_object *, u64); +void _nouveau_falcon_wr32(struct nouveau_object *, u64, u32); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h new file mode 100644 index 0000000..69ee8c0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h @@ -0,0 +1,71 @@ +#ifndef __NOUVEAU_GPUOBJ_H__ +#define __NOUVEAU_GPUOBJ_H__ + +#include <core/object.h> +#include <core/device.h> +#include <core/parent.h> +#include <core/mm.h> + +struct nouveau_vma; +struct nouveau_vm; + +#define NVOBJ_FLAG_ZERO_ALLOC 0x00000001 +#define NVOBJ_FLAG_ZERO_FREE 0x00000002 +#define NVOBJ_FLAG_HEAP 0x00000004 + +struct nouveau_gpuobj { + struct nouveau_object base; + struct nouveau_object *parent; + struct nouveau_mm_node *node; + struct nouveau_mm heap; + + u32 flags; + u64 addr; + u32 size; +}; + +static inline struct nouveau_gpuobj * +nv_gpuobj(void *obj) +{ +#if CPTCFG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA + if (unlikely(!nv_iclass(obj, NV_GPUOBJ_CLASS))) + nv_assert("BAD CAST -> NvGpuObj, %08x", nv_hclass(obj)); +#endif + return obj; +} + +#define nouveau_gpuobj_create(p,e,c,v,g,s,a,f,d) \ + nouveau_gpuobj_create_((p), (e), (c), (v), (g), (s), (a), (f), \ + sizeof(**d), (void **)d) +#define nouveau_gpuobj_init(p) nouveau_object_init(&(p)->base) +#define nouveau_gpuobj_fini(p,s) nouveau_object_fini(&(p)->base, (s)) +int nouveau_gpuobj_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, u32 pclass, + struct nouveau_object *, u32 size, u32 align, + u32 flags, int length, void **); +void nouveau_gpuobj_destroy(struct nouveau_gpuobj *); + +int nouveau_gpuobj_new(struct nouveau_object *, struct nouveau_object *, + u32 size, u32 align, u32 flags, + struct nouveau_gpuobj **); +int nouveau_gpuobj_dup(struct nouveau_object *, struct nouveau_gpuobj *, + struct nouveau_gpuobj **); + +int nouveau_gpuobj_map(struct nouveau_gpuobj *, u32 acc, struct nouveau_vma *); +int nouveau_gpuobj_map_vm(struct nouveau_gpuobj *, struct nouveau_vm *, + u32 access, struct nouveau_vma *); +void nouveau_gpuobj_unmap(struct nouveau_vma *); + +static inline void +nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref) +{ + nouveau_object_ref(&obj->base, (struct nouveau_object **)ref); +} + +void _nouveau_gpuobj_dtor(struct nouveau_object *); +int _nouveau_gpuobj_init(struct nouveau_object *); +int _nouveau_gpuobj_fini(struct nouveau_object *, bool); +u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u64); +void _nouveau_gpuobj_wr32(struct nouveau_object *, u64, u32); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/handle.h b/drivers/gpu/drm/nouveau/core/include/core/handle.h new file mode 100644 index 0000000..363674c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/handle.h @@ -0,0 +1,31 @@ +#ifndef __NOUVEAU_HANDLE_H__ +#define __NOUVEAU_HANDLE_H__ + +struct nouveau_handle { + struct nouveau_namedb *namedb; + struct list_head node; + + struct list_head head; + struct list_head tree; + u32 name; + u32 priv; + + struct nouveau_handle *parent; + struct nouveau_object *object; +}; + +int nouveau_handle_create(struct nouveau_object *, u32 parent, u32 handle, + struct nouveau_object *, struct nouveau_handle **); +void nouveau_handle_destroy(struct nouveau_handle *); +int nouveau_handle_init(struct nouveau_handle *); +int nouveau_handle_fini(struct nouveau_handle *, bool suspend); + +struct nouveau_object * +nouveau_handle_ref(struct nouveau_object *, u32 name); + +struct nouveau_handle *nouveau_handle_get_class(struct nouveau_object *, u16); +struct nouveau_handle *nouveau_handle_get_vinst(struct nouveau_object *, u64); +struct nouveau_handle *nouveau_handle_get_cinst(struct nouveau_object *, u32); +void nouveau_handle_put(struct nouveau_handle *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/math.h b/drivers/gpu/drm/nouveau/core/include/core/math.h new file mode 100644 index 0000000..f808131 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/math.h @@ -0,0 +1,16 @@ +#ifndef __NOUVEAU_MATH_H__ +#define __NOUVEAU_MATH_H__ + +static inline int +log2i(u64 base) +{ + u64 temp = base >> 1; + int log2; + + for (log2 = 0; temp; log2++, temp >>= 1) { + } + + return (base & (base - 1)) ? log2 + 1: log2; +} + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/mm.h b/drivers/gpu/drm/nouveau/core/include/core/mm.h new file mode 100644 index 0000000..2514e81 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/mm.h @@ -0,0 +1,38 @@ +#ifndef __NOUVEAU_MM_H__ +#define __NOUVEAU_MM_H__ + +struct nouveau_mm_node { + struct list_head nl_entry; + struct list_head fl_entry; + struct list_head rl_entry; + + u8 type; + u32 offset; + u32 length; +}; + +struct nouveau_mm { + struct list_head nodes; + struct list_head free; + + struct mutex mutex; + + u32 block_size; + int heap_nodes; +}; + +static inline bool +nouveau_mm_initialised(struct nouveau_mm *mm) +{ + return mm->block_size != 0; +} + +int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block); +int nouveau_mm_fini(struct nouveau_mm *); +int nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min, + u32 align, struct nouveau_mm_node **); +int nouveau_mm_tail(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min, + u32 align, struct nouveau_mm_node **); +void nouveau_mm_free(struct nouveau_mm *, struct nouveau_mm_node **); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/namedb.h b/drivers/gpu/drm/nouveau/core/include/core/namedb.h new file mode 100644 index 0000000..dbaa08b --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/namedb.h @@ -0,0 +1,56 @@ +#ifndef __NOUVEAU_NAMEDB_H__ +#define __NOUVEAU_NAMEDB_H__ + +#include <core/parent.h> + +struct nouveau_handle; + +struct nouveau_namedb { + struct nouveau_parent base; + rwlock_t lock; + struct list_head list; +}; + +static inline struct nouveau_namedb * +nv_namedb(void *obj) +{ +#if CPTCFG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA + if (unlikely(!nv_iclass(obj, NV_NAMEDB_CLASS))) + nv_assert("BAD CAST -> NvNameDB, %08x", nv_hclass(obj)); +#endif + return obj; +} + +#define nouveau_namedb_create(p,e,c,v,s,m,d) \ + nouveau_namedb_create_((p), (e), (c), (v), (s), (m), \ + sizeof(**d), (void **)d) +#define nouveau_namedb_init(p) \ + nouveau_parent_init(&(p)->base) +#define nouveau_namedb_fini(p,s) \ + nouveau_parent_fini(&(p)->base, (s)) +#define nouveau_namedb_destroy(p) \ + nouveau_parent_destroy(&(p)->base) + +int nouveau_namedb_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, u32 pclass, + struct nouveau_oclass *, u32 engcls, + int size, void **); + +int _nouveau_namedb_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); +#define _nouveau_namedb_dtor _nouveau_parent_dtor +#define _nouveau_namedb_init _nouveau_parent_init +#define _nouveau_namedb_fini _nouveau_parent_fini + +int nouveau_namedb_insert(struct nouveau_namedb *, u32 name, + struct nouveau_object *, struct nouveau_handle *); +void nouveau_namedb_remove(struct nouveau_handle *); + +struct nouveau_handle *nouveau_namedb_get(struct nouveau_namedb *, u32); +struct nouveau_handle *nouveau_namedb_get_class(struct nouveau_namedb *, u16); +struct nouveau_handle *nouveau_namedb_get_vinst(struct nouveau_namedb *, u64); +struct nouveau_handle *nouveau_namedb_get_cinst(struct nouveau_namedb *, u32); +void nouveau_namedb_put(struct nouveau_handle *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h new file mode 100644 index 0000000..ce6d85c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/object.h @@ -0,0 +1,202 @@ +#ifndef __NOUVEAU_OBJECT_H__ +#define __NOUVEAU_OBJECT_H__ + +#include <core/os.h> +#include <core/printk.h> + +#define NV_PARENT_CLASS 0x80000000 +#define NV_NAMEDB_CLASS 0x40000000 +#define NV_CLIENT_CLASS 0x20000000 +#define NV_SUBDEV_CLASS 0x10000000 +#define NV_ENGINE_CLASS 0x08000000 +#define NV_MEMOBJ_CLASS 0x04000000 +#define NV_GPUOBJ_CLASS 0x02000000 +#define NV_ENGCTX_CLASS 0x01000000 +#define NV_OBJECT_CLASS 0x0000ffff + +struct nouveau_object { + struct nouveau_oclass *oclass; + struct nouveau_object *parent; + struct nouveau_object *engine; + atomic_t refcount; + atomic_t usecount; +#if CPTCFG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA +#define NOUVEAU_OBJECT_MAGIC 0x75ef0bad + struct list_head list; + u32 _magic; +#endif +}; + +static inline struct nouveau_object * +nv_object(void *obj) +{ +#if CPTCFG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA + if (likely(obj)) { + struct nouveau_object *object = obj; + if (unlikely(object->_magic != NOUVEAU_OBJECT_MAGIC)) + nv_assert("BAD CAST -> NvObject, invalid magic"); + } +#endif + return obj; +} + +#define nouveau_object_create(p,e,c,s,d) \ + nouveau_object_create_((p), (e), (c), (s), sizeof(**d), (void **)d) +int nouveau_object_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, u32, int size, void **); +void nouveau_object_destroy(struct nouveau_object *); +int nouveau_object_init(struct nouveau_object *); +int nouveau_object_fini(struct nouveau_object *, bool suspend); + +extern struct nouveau_ofuncs nouveau_object_ofuncs; + +/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in + * ".data". */ +struct nouveau_oclass { + u32 handle; + struct nouveau_ofuncs * const ofuncs; + struct nouveau_omthds * const omthds; + struct lock_class_key lock_class_key; +}; + +#define nv_oclass(o) nv_object(o)->oclass +#define nv_hclass(o) nv_oclass(o)->handle +#define nv_iclass(o,i) (nv_hclass(o) & (i)) +#define nv_mclass(o) nv_iclass(o, NV_OBJECT_CLASS) + +static inline struct nouveau_object * +nv_pclass(struct nouveau_object *parent, u32 oclass) +{ + while (parent && !nv_iclass(parent, oclass)) + parent = parent->parent; + return parent; +} + +struct nouveau_omthds { + u32 start; + u32 limit; + int (*call)(struct nouveau_object *, u32, void *, u32); +}; + +struct nouveau_ofuncs { + int (*ctor)(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *data, u32 size, + struct nouveau_object **); + void (*dtor)(struct nouveau_object *); + int (*init)(struct nouveau_object *); + int (*fini)(struct nouveau_object *, bool suspend); + u8 (*rd08)(struct nouveau_object *, u64 offset); + u16 (*rd16)(struct nouveau_object *, u64 offset); + u32 (*rd32)(struct nouveau_object *, u64 offset); + void (*wr08)(struct nouveau_object *, u64 offset, u8 data); + void (*wr16)(struct nouveau_object *, u64 offset, u16 data); + void (*wr32)(struct nouveau_object *, u64 offset, u32 data); +}; + +static inline struct nouveau_ofuncs * +nv_ofuncs(void *obj) +{ + return nv_oclass(obj)->ofuncs; +} + +int nouveau_object_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); +void nouveau_object_ref(struct nouveau_object *, struct nouveau_object **); +int nouveau_object_inc(struct nouveau_object *); +int nouveau_object_dec(struct nouveau_object *, bool suspend); + +int nouveau_object_new(struct nouveau_object *, u32 parent, u32 handle, + u16 oclass, void *data, u32 size, + struct nouveau_object **); +int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle); +void nouveau_object_debug(void); + +static inline int +nv_exec(void *obj, u32 mthd, void *data, u32 size) +{ + struct nouveau_omthds *method = nv_oclass(obj)->omthds; + + while (method && method->call) { + if (mthd >= method->start && mthd <= method->limit) + return method->call(obj, mthd, data, size); + method++; + } + + return -EINVAL; +} + +static inline int +nv_call(void *obj, u32 mthd, u32 data) +{ + return nv_exec(obj, mthd, &data, sizeof(data)); +} + +static inline u8 +nv_ro08(void *obj, u64 addr) +{ + u8 data = nv_ofuncs(obj)->rd08(obj, addr); + nv_spam(obj, "nv_ro08 0x%08llx 0x%02x\n", addr, data); + return data; +} + +static inline u16 +nv_ro16(void *obj, u64 addr) +{ + u16 data = nv_ofuncs(obj)->rd16(obj, addr); + nv_spam(obj, "nv_ro16 0x%08llx 0x%04x\n", addr, data); + return data; +} + +static inline u32 +nv_ro32(void *obj, u64 addr) +{ + u32 data = nv_ofuncs(obj)->rd32(obj, addr); + nv_spam(obj, "nv_ro32 0x%08llx 0x%08x\n", addr, data); + return data; +} + +static inline void +nv_wo08(void *obj, u64 addr, u8 data) +{ + nv_spam(obj, "nv_wo08 0x%08llx 0x%02x\n", addr, data); + nv_ofuncs(obj)->wr08(obj, addr, data); +} + +static inline void +nv_wo16(void *obj, u64 addr, u16 data) +{ + nv_spam(obj, "nv_wo16 0x%08llx 0x%04x\n", addr, data); + nv_ofuncs(obj)->wr16(obj, addr, data); +} + +static inline void +nv_wo32(void *obj, u64 addr, u32 data) +{ + nv_spam(obj, "nv_wo32 0x%08llx 0x%08x\n", addr, data); + nv_ofuncs(obj)->wr32(obj, addr, data); +} + +static inline u32 +nv_mo32(void *obj, u64 addr, u32 mask, u32 data) +{ + u32 temp = nv_ro32(obj, addr); + nv_wo32(obj, addr, (temp & ~mask) | data); + return temp; +} + +static inline int +nv_memcmp(void *obj, u32 addr, const char *str, u32 len) +{ + unsigned char c1, c2; + + while (len--) { + c1 = nv_ro08(obj, addr++); + c2 = *(str++); + if (c1 != c2) + return c1 - c2; + } + return 0; +} + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/option.h b/drivers/gpu/drm/nouveau/core/include/core/option.h new file mode 100644 index 0000000..2707495 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/option.h @@ -0,0 +1,11 @@ +#ifndef __NOUVEAU_OPTION_H__ +#define __NOUVEAU_OPTION_H__ + +#include <core/os.h> + +const char *nouveau_stropt(const char *optstr, const char *opt, int *len); +bool nouveau_boolopt(const char *optstr, const char *opt, bool value); + +int nouveau_dbgopt(const char *optstr, const char *sub); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h new file mode 100644 index 0000000..1dd57f6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h @@ -0,0 +1,61 @@ +#ifndef __NOUVEAU_PARENT_H__ +#define __NOUVEAU_PARENT_H__ + +#include <core/device.h> +#include <core/object.h> + +struct nouveau_sclass { + struct nouveau_sclass *sclass; + struct nouveau_engine *engine; + struct nouveau_oclass *oclass; +}; + +struct nouveau_parent { + struct nouveau_object base; + + struct nouveau_sclass *sclass; + u64 engine; + + int (*context_attach)(struct nouveau_object *, + struct nouveau_object *); + int (*context_detach)(struct nouveau_object *, bool suspend, + struct nouveau_object *); + + int (*object_attach)(struct nouveau_object *parent, + struct nouveau_object *object, u32 name); + void (*object_detach)(struct nouveau_object *parent, int cookie); +}; + +static inline struct nouveau_parent * +nv_parent(void *obj) +{ +#if CPTCFG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA + if (unlikely(!(nv_iclass(obj, NV_PARENT_CLASS)))) + nv_assert("BAD CAST -> NvParent, %08x", nv_hclass(obj)); +#endif + return obj; +} + +#define nouveau_parent_create(p,e,c,v,s,m,d) \ + nouveau_parent_create_((p), (e), (c), (v), (s), (m), \ + sizeof(**d), (void **)d) +#define nouveau_parent_init(p) \ + nouveau_object_init(&(p)->base) +#define nouveau_parent_fini(p,s) \ + nouveau_object_fini(&(p)->base, (s)) + +int nouveau_parent_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, u32 pclass, + struct nouveau_oclass *, u64 engcls, + int size, void **); +void nouveau_parent_destroy(struct nouveau_parent *); + +void _nouveau_parent_dtor(struct nouveau_object *); +#define _nouveau_parent_init nouveau_object_init +#define _nouveau_parent_fini nouveau_object_fini + +int nouveau_parent_sclass(struct nouveau_object *, u16 handle, + struct nouveau_object **pengine, + struct nouveau_oclass **poclass); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h new file mode 100644 index 0000000..546bc69 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h @@ -0,0 +1,40 @@ +#ifndef __NOUVEAU_PRINTK_H__ +#define __NOUVEAU_PRINTK_H__ + +#include <core/os.h> +#include <core/debug.h> + +struct nouveau_object; + +#define NV_PRINTK_FATAL KERN_CRIT +#define NV_PRINTK_ERROR KERN_ERR +#define NV_PRINTK_WARN KERN_WARNING +#define NV_PRINTK_INFO KERN_INFO +#define NV_PRINTK_DEBUG KERN_DEBUG +#define NV_PRINTK_PARANOIA KERN_DEBUG +#define NV_PRINTK_TRACE KERN_DEBUG +#define NV_PRINTK_SPAM KERN_DEBUG + +void __printf(4, 5) +nv_printk_(struct nouveau_object *, const char *, int, const char *, ...); + +#define nv_printk(o,l,f,a...) do { \ + if (NV_DBG_##l <= CPTCFG_NOUVEAU_DEBUG) \ + nv_printk_(nv_object(o), NV_PRINTK_##l, NV_DBG_##l, f, ##a); \ +} while(0) + +#define nv_fatal(o,f,a...) nv_printk((o), FATAL, f, ##a) +#define nv_error(o,f,a...) nv_printk((o), ERROR, f, ##a) +#define nv_warn(o,f,a...) nv_printk((o), WARN, f, ##a) +#define nv_info(o,f,a...) nv_printk((o), INFO, f, ##a) +#define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a) +#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a) +#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a) + +#define nv_assert(f,a...) do { \ + if (NV_DBG_FATAL <= CPTCFG_NOUVEAU_DEBUG) \ + nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a); \ + BUG_ON(1); \ +} while(0) + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/ramht.h b/drivers/gpu/drm/nouveau/core/include/core/ramht.h new file mode 100644 index 0000000..47e4cac --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/ramht.h @@ -0,0 +1,23 @@ +#ifndef __NOUVEAU_RAMHT_H__ +#define __NOUVEAU_RAMHT_H__ + +#include <core/gpuobj.h> + +struct nouveau_ramht { + struct nouveau_gpuobj base; + int bits; +}; + +int nouveau_ramht_insert(struct nouveau_ramht *, int chid, + u32 handle, u32 context); +void nouveau_ramht_remove(struct nouveau_ramht *, int cookie); +int nouveau_ramht_new(struct nouveau_object *, struct nouveau_object *, + u32 size, u32 align, struct nouveau_ramht **); + +static inline void +nouveau_ramht_ref(struct nouveau_ramht *obj, struct nouveau_ramht **ref) +{ + nouveau_gpuobj_ref(&obj->base, (struct nouveau_gpuobj **)ref); +} + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/subdev.h b/drivers/gpu/drm/nouveau/core/include/core/subdev.h new file mode 100644 index 0000000..cd2a622 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/subdev.h @@ -0,0 +1,118 @@ +#ifndef __NOUVEAU_SUBDEV_H__ +#define __NOUVEAU_SUBDEV_H__ + +#include <core/object.h> + +#define NV_SUBDEV_(sub,var) (NV_SUBDEV_CLASS | ((var) << 8) | (sub)) +#define NV_SUBDEV(name,var) NV_SUBDEV_(NVDEV_SUBDEV_##name, (var)) + +struct nouveau_subdev { + struct nouveau_object base; + struct mutex mutex; + const char *name; + void __iomem *mmio; + u32 debug; + u32 unit; + + void (*intr)(struct nouveau_subdev *); +}; + +static inline struct nouveau_subdev * +nv_subdev(void *obj) +{ +#if CPTCFG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA + if (unlikely(!nv_iclass(obj, NV_SUBDEV_CLASS))) + nv_assert("BAD CAST -> NvSubDev, %08x", nv_hclass(obj)); +#endif + return obj; +} + +static inline int +nv_subidx(struct nouveau_object *object) +{ + return nv_hclass(nv_subdev(object)) & 0xff; +} + +#define nouveau_subdev_create(p,e,o,v,s,f,d) \ + nouveau_subdev_create_((p), (e), (o), (v), (s), (f), \ + sizeof(**d),(void **)d) + +int nouveau_subdev_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, u32 pclass, + const char *sname, const char *fname, + int size, void **); +void nouveau_subdev_destroy(struct nouveau_subdev *); +int nouveau_subdev_init(struct nouveau_subdev *); +int nouveau_subdev_fini(struct nouveau_subdev *, bool suspend); +void nouveau_subdev_reset(struct nouveau_object *); + +void _nouveau_subdev_dtor(struct nouveau_object *); +int _nouveau_subdev_init(struct nouveau_object *); +int _nouveau_subdev_fini(struct nouveau_object *, bool suspend); + +#define s_printk(s,l,f,a...) do { \ + if ((s)->debug >= OS_DBG_##l) { \ + nv_printk((s)->base.parent, (s)->name, l, f, ##a); \ + } \ +} while(0) + +static inline u8 +nv_rd08(void *obj, u32 addr) +{ + struct nouveau_subdev *subdev = nv_subdev(obj); + u8 data = ioread8(subdev->mmio + addr); + nv_spam(subdev, "nv_rd08 0x%06x 0x%02x\n", addr, data); + return data; +} + +static inline u16 +nv_rd16(void *obj, u32 addr) +{ + struct nouveau_subdev *subdev = nv_subdev(obj); + u16 data = ioread16_native(subdev->mmio + addr); + nv_spam(subdev, "nv_rd16 0x%06x 0x%04x\n", addr, data); + return data; +} + +static inline u32 +nv_rd32(void *obj, u32 addr) +{ + struct nouveau_subdev *subdev = nv_subdev(obj); + u32 data = ioread32_native(subdev->mmio + addr); + nv_spam(subdev, "nv_rd32 0x%06x 0x%08x\n", addr, data); + return data; +} + +static inline void +nv_wr08(void *obj, u32 addr, u8 data) +{ + struct nouveau_subdev *subdev = nv_subdev(obj); + nv_spam(subdev, "nv_wr08 0x%06x 0x%02x\n", addr, data); + iowrite8(data, subdev->mmio + addr); +} + +static inline void +nv_wr16(void *obj, u32 addr, u16 data) +{ + struct nouveau_subdev *subdev = nv_subdev(obj); + nv_spam(subdev, "nv_wr16 0x%06x 0x%04x\n", addr, data); + iowrite16_native(data, subdev->mmio + addr); +} + +static inline void +nv_wr32(void *obj, u32 addr, u32 data) +{ + struct nouveau_subdev *subdev = nv_subdev(obj); + nv_spam(subdev, "nv_wr32 0x%06x 0x%08x\n", addr, data); + iowrite32_native(data, subdev->mmio + addr); +} + +static inline u32 +nv_mask(void *obj, u32 addr, u32 mask, u32 data) +{ + u32 temp = nv_rd32(obj, addr); + nv_wr32(obj, addr, (temp & ~mask) | data); + return temp; +} + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h new file mode 100644 index 0000000..13ccdf5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h @@ -0,0 +1,8 @@ +#ifndef __NOUVEAU_BSP_H__ +#define __NOUVEAU_BSP_H__ + +extern struct nouveau_oclass nv84_bsp_oclass; +extern struct nouveau_oclass nvc0_bsp_oclass; +extern struct nouveau_oclass nve0_bsp_oclass; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/engine/copy.h b/drivers/gpu/drm/nouveau/core/include/engine/copy.h new file mode 100644 index 0000000..8cad2cf --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/engine/copy.h @@ -0,0 +1,12 @@ +#ifndef __NOUVEAU_COPY_H__ +#define __NOUVEAU_COPY_H__ + +void nva3_copy_intr(struct nouveau_subdev *); + +extern struct nouveau_oclass nva3_copy_oclass; +extern struct nouveau_oclass nvc0_copy0_oclass; +extern struct nouveau_oclass nvc0_copy1_oclass; +extern struct nouveau_oclass nve0_copy0_oclass; +extern struct nouveau_oclass nve0_copy1_oclass; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h new file mode 100644 index 0000000..db97561 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h @@ -0,0 +1,7 @@ +#ifndef __NOUVEAU_CRYPT_H__ +#define __NOUVEAU_CRYPT_H__ + +extern struct nouveau_oclass nv84_crypt_oclass; +extern struct nouveau_oclass nv98_crypt_oclass; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/engine/device.h b/drivers/gpu/drm/nouveau/core/include/engine/device.h new file mode 100644 index 0000000..b3dd2c4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/engine/device.h @@ -0,0 +1,23 @@ +#ifndef __NOUVEAU_SUBDEV_DEVICE_H__ +#define __NOUVEAU_SUBDEV_DEVICE_H__ + +#include <core/device.h> + +#define nouveau_device_create(p,n,s,c,d,u) \ + nouveau_device_create_((p), (n), (s), (c), (d), sizeof(**u), (void **)u) + +int nouveau_device_create_(struct pci_dev *, u64 name, const char *sname, + const char *cfg, const char *dbg, int, void **); + +int nv04_identify(struct nouveau_device *); +int nv10_identify(struct nouveau_device *); +int nv20_identify(struct nouveau_device *); +int nv30_identify(struct nouveau_device *); +int nv40_identify(struct nouveau_device *); +int nv50_identify(struct nouveau_device *); +int nvc0_identify(struct nouveau_device *); +int nve0_identify(struct nouveau_device *); + +struct nouveau_device *nouveau_device_find(u64 name); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h new file mode 100644 index 0000000..4b21fab --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h @@ -0,0 +1,49 @@ +#ifndef __NOUVEAU_DISP_H__ +#define __NOUVEAU_DISP_H__ + +#include <core/object.h> +#include <core/engine.h> +#include <core/device.h> +#include <core/event.h> + +struct nouveau_disp { + struct nouveau_engine base; + struct nouveau_event *vblank; +}; + +static inline struct nouveau_disp * +nouveau_disp(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_DISP]; +} + +#define nouveau_disp_create(p,e,c,h,i,x,d) \ + nouveau_disp_create_((p), (e), (c), (h), (i), (x), \ + sizeof(**d), (void **)d) +#define nouveau_disp_destroy(d) ({ \ + struct nouveau_disp *disp = (d); \ + _nouveau_disp_dtor(nv_object(disp)); \ +}) +#define nouveau_disp_init(d) \ + nouveau_engine_init(&(d)->base) +#define nouveau_disp_fini(d,s) \ + nouveau_engine_fini(&(d)->base, (s)) + +int nouveau_disp_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, int heads, + const char *, const char *, int, void **); +void _nouveau_disp_dtor(struct nouveau_object *); +#define _nouveau_disp_init _nouveau_engine_init +#define _nouveau_disp_fini _nouveau_engine_fini + +extern struct nouveau_oclass nv04_disp_oclass; +extern struct nouveau_oclass nv50_disp_oclass; +extern struct nouveau_oclass nv84_disp_oclass; +extern struct nouveau_oclass nva0_disp_oclass; +extern struct nouveau_oclass nv94_disp_oclass; +extern struct nouveau_oclass nva3_disp_oclass; +extern struct nouveau_oclass nvd0_disp_oclass; +extern struct nouveau_oclass nve0_disp_oclass; +extern struct nouveau_oclass nvf0_disp_oclass; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h new file mode 100644 index 0000000..b28914e --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h @@ -0,0 +1,48 @@ +#ifndef __NOUVEAU_DMAOBJ_H__ +#define __NOUVEAU_DMAOBJ_H__ + +#include <core/object.h> +#include <core/engine.h> + +struct nouveau_gpuobj; + +struct nouveau_dmaobj { + struct nouveau_object base; + u32 target; + u32 access; + u64 start; + u64 limit; + u32 conf0; +}; + +struct nouveau_dmaeng { + struct nouveau_engine base; + + /* creates a "physical" dma object from a struct nouveau_dmaobj */ + int (*bind)(struct nouveau_dmaeng *dmaeng, + struct nouveau_object *parent, + struct nouveau_dmaobj *dmaobj, + struct nouveau_gpuobj **); +}; + +#define nouveau_dmaeng_create(p,e,c,d) \ + nouveau_engine_create((p), (e), (c), true, "DMAOBJ", "dmaobj", (d)) +#define nouveau_dmaeng_destroy(p) \ + nouveau_engine_destroy(&(p)->base) +#define nouveau_dmaeng_init(p) \ + nouveau_engine_init(&(p)->base) +#define nouveau_dmaeng_fini(p,s) \ + nouveau_engine_fini(&(p)->base, (s)) + +#define _nouveau_dmaeng_dtor _nouveau_engine_dtor +#define _nouveau_dmaeng_init _nouveau_engine_init +#define _nouveau_dmaeng_fini _nouveau_engine_fini + +extern struct nouveau_oclass nv04_dmaeng_oclass; +extern struct nouveau_oclass nv50_dmaeng_oclass; +extern struct nouveau_oclass nvc0_dmaeng_oclass; +extern struct nouveau_oclass nvd0_dmaeng_oclass; + +extern struct nouveau_oclass nouveau_dmaobj_sclass[]; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h new file mode 100644 index 0000000..633c2f8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h @@ -0,0 +1,116 @@ +#ifndef __NOUVEAU_FIFO_H__ +#define __NOUVEAU_FIFO_H__ + +#include <core/namedb.h> +#include <core/gpuobj.h> +#include <core/engine.h> + +struct nouveau_fifo_chan { + struct nouveau_namedb base; + struct nouveau_dmaobj *pushdma; + struct nouveau_gpuobj *pushgpu; + void __iomem *user; + u32 size; + u16 chid; + atomic_t refcnt; /* NV04_NVSW_SET_REF */ +}; + +static inline struct nouveau_fifo_chan * +nouveau_fifo_chan(void *obj) +{ + return (void *)nv_namedb(obj); +} + +#define nouveau_fifo_channel_create(p,e,c,b,a,s,n,m,d) \ + nouveau_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n), \ + (m), sizeof(**d), (void **)d) +#define nouveau_fifo_channel_init(p) \ + nouveau_namedb_init(&(p)->base) +#define nouveau_fifo_channel_fini(p,s) \ + nouveau_namedb_fini(&(p)->base, (s)) + +int nouveau_fifo_channel_create_(struct nouveau_object *, + struct nouveau_object *, + struct nouveau_oclass *, + int bar, u32 addr, u32 size, u32 push, + u64 engmask, int len, void **); +void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *); + +#define _nouveau_fifo_channel_init _nouveau_namedb_init +#define _nouveau_fifo_channel_fini _nouveau_namedb_fini + +void _nouveau_fifo_channel_dtor(struct nouveau_object *); +u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u64); +void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32); + +struct nouveau_fifo_base { + struct nouveau_gpuobj base; +}; + +#define nouveau_fifo_context_create(p,e,c,g,s,a,f,d) \ + nouveau_gpuobj_create((p), (e), (c), 0, (g), (s), (a), (f), (d)) +#define nouveau_fifo_context_destroy(p) \ + nouveau_gpuobj_destroy(&(p)->base) +#define nouveau_fifo_context_init(p) \ + nouveau_gpuobj_init(&(p)->base) +#define nouveau_fifo_context_fini(p,s) \ + nouveau_gpuobj_fini(&(p)->base, (s)) + +#define _nouveau_fifo_context_dtor _nouveau_gpuobj_dtor +#define _nouveau_fifo_context_init _nouveau_gpuobj_init +#define _nouveau_fifo_context_fini _nouveau_gpuobj_fini +#define _nouveau_fifo_context_rd32 _nouveau_gpuobj_rd32 +#define _nouveau_fifo_context_wr32 _nouveau_gpuobj_wr32 + +struct nouveau_fifo { + struct nouveau_engine base; + + struct nouveau_event *cevent; /* channel creation event */ + struct nouveau_event *uevent; /* async user trigger */ + + struct nouveau_object **channel; + spinlock_t lock; + u16 min; + u16 max; + + int (*chid)(struct nouveau_fifo *, struct nouveau_object *); + void (*pause)(struct nouveau_fifo *, unsigned long *); + void (*start)(struct nouveau_fifo *, unsigned long *); +}; + +static inline struct nouveau_fifo * +nouveau_fifo(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_FIFO]; +} + +#define nouveau_fifo_create(o,e,c,fc,lc,d) \ + nouveau_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d) +#define nouveau_fifo_init(p) \ + nouveau_engine_init(&(p)->base) +#define nouveau_fifo_fini(p,s) \ + nouveau_engine_fini(&(p)->base, (s)) + +int nouveau_fifo_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, int min, int max, + int size, void **); +void nouveau_fifo_destroy(struct nouveau_fifo *); +const char * +nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid); + +#define _nouveau_fifo_init _nouveau_engine_init +#define _nouveau_fifo_fini _nouveau_engine_fini + +extern struct nouveau_oclass nv04_fifo_oclass; +extern struct nouveau_oclass nv10_fifo_oclass; +extern struct nouveau_oclass nv17_fifo_oclass; +extern struct nouveau_oclass nv40_fifo_oclass; +extern struct nouveau_oclass nv50_fifo_oclass; +extern struct nouveau_oclass nv84_fifo_oclass; +extern struct nouveau_oclass nvc0_fifo_oclass; +extern struct nouveau_oclass nve0_fifo_oclass; + +void nv04_fifo_intr(struct nouveau_subdev *); +int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h new file mode 100644 index 0000000..5d39243 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h @@ -0,0 +1,76 @@ +#ifndef __NOUVEAU_GRAPH_H__ +#define __NOUVEAU_GRAPH_H__ + +#include <core/engine.h> +#include <core/engctx.h> +#include <core/enum.h> + +struct nouveau_graph_chan { + struct nouveau_engctx base; +}; + +#define nouveau_graph_context_create(p,e,c,g,s,a,f,d) \ + nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d)) +#define nouveau_graph_context_destroy(d) \ + nouveau_engctx_destroy(&(d)->base) +#define nouveau_graph_context_init(d) \ + nouveau_engctx_init(&(d)->base) +#define nouveau_graph_context_fini(d,s) \ + nouveau_engctx_fini(&(d)->base, (s)) + +#define _nouveau_graph_context_dtor _nouveau_engctx_dtor +#define _nouveau_graph_context_init _nouveau_engctx_init +#define _nouveau_graph_context_fini _nouveau_engctx_fini +#define _nouveau_graph_context_rd32 _nouveau_engctx_rd32 +#define _nouveau_graph_context_wr32 _nouveau_engctx_wr32 + +struct nouveau_graph { + struct nouveau_engine base; + + /* Returns chipset-specific counts of units packed into an u64. + */ + u64 (*units)(struct nouveau_graph *); +}; + +static inline struct nouveau_graph * +nouveau_graph(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_GR]; +} + +#define nouveau_graph_create(p,e,c,y,d) \ + nouveau_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d)) +#define nouveau_graph_destroy(d) \ + nouveau_engine_destroy(&(d)->base) +#define nouveau_graph_init(d) \ + nouveau_engine_init(&(d)->base) +#define nouveau_graph_fini(d,s) \ + nouveau_engine_fini(&(d)->base, (s)) + +#define _nouveau_graph_dtor _nouveau_engine_dtor +#define _nouveau_graph_init _nouveau_engine_init +#define _nouveau_graph_fini _nouveau_engine_fini + +extern struct nouveau_oclass nv04_graph_oclass; +extern struct nouveau_oclass nv10_graph_oclass; +extern struct nouveau_oclass nv20_graph_oclass; +extern struct nouveau_oclass nv25_graph_oclass; +extern struct nouveau_oclass nv2a_graph_oclass; +extern struct nouveau_oclass nv30_graph_oclass; +extern struct nouveau_oclass nv34_graph_oclass; +extern struct nouveau_oclass nv35_graph_oclass; +extern struct nouveau_oclass nv40_graph_oclass; +extern struct nouveau_oclass nv50_graph_oclass; +extern struct nouveau_oclass nvc0_graph_oclass; +extern struct nouveau_oclass nve0_graph_oclass; + +extern const struct nouveau_bitfield nv04_graph_nsource[]; +extern struct nouveau_ofuncs nv04_graph_ofuncs; +bool nv04_graph_idle(void *obj); + +extern const struct nouveau_bitfield nv10_graph_intr_name[]; +extern const struct nouveau_bitfield nv10_graph_nstatus[]; + +extern const struct nouveau_enum nv50_data_error_names[]; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h new file mode 100644 index 0000000..bbf0d4a --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h @@ -0,0 +1,61 @@ +#ifndef __NOUVEAU_MPEG_H__ +#define __NOUVEAU_MPEG_H__ + +#include <core/engine.h> +#include <core/engctx.h> + +struct nouveau_mpeg_chan { + struct nouveau_engctx base; +}; + +#define nouveau_mpeg_context_create(p,e,c,g,s,a,f,d) \ + nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d)) +#define nouveau_mpeg_context_destroy(d) \ + nouveau_engctx_destroy(&(d)->base) +#define nouveau_mpeg_context_init(d) \ + nouveau_engctx_init(&(d)->base) +#define nouveau_mpeg_context_fini(d,s) \ + nouveau_engctx_fini(&(d)->base, (s)) + +#define _nouveau_mpeg_context_dtor _nouveau_engctx_dtor +#define _nouveau_mpeg_context_init _nouveau_engctx_init +#define _nouveau_mpeg_context_fini _nouveau_engctx_fini +#define _nouveau_mpeg_context_rd32 _nouveau_engctx_rd32 +#define _nouveau_mpeg_context_wr32 _nouveau_engctx_wr32 + +struct nouveau_mpeg { + struct nouveau_engine base; +}; + +#define nouveau_mpeg_create(p,e,c,d) \ + nouveau_engine_create((p), (e), (c), true, "PMPEG", "mpeg", (d)) +#define nouveau_mpeg_destroy(d) \ + nouveau_engine_destroy(&(d)->base) +#define nouveau_mpeg_init(d) \ + nouveau_engine_init(&(d)->base) +#define nouveau_mpeg_fini(d,s) \ + nouveau_engine_fini(&(d)->base, (s)) + +#define _nouveau_mpeg_dtor _nouveau_engine_dtor +#define _nouveau_mpeg_init _nouveau_engine_init +#define _nouveau_mpeg_fini _nouveau_engine_fini + +extern struct nouveau_oclass nv31_mpeg_oclass; +extern struct nouveau_oclass nv40_mpeg_oclass; +extern struct nouveau_oclass nv50_mpeg_oclass; +extern struct nouveau_oclass nv84_mpeg_oclass; + +extern struct nouveau_oclass nv31_mpeg_sclass[]; +void nv31_mpeg_intr(struct nouveau_subdev *); +void nv31_mpeg_tile_prog(struct nouveau_engine *, int); +int nv31_mpeg_init(struct nouveau_object *); + +extern struct nouveau_ofuncs nv50_mpeg_ofuncs; +int nv50_mpeg_context_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); +int nv50_mpeg_tlb_flush(struct nouveau_engine *); +void nv50_mpeg_intr(struct nouveau_subdev *); +int nv50_mpeg_init(struct nouveau_object *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h new file mode 100644 index 0000000..0a66781 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h @@ -0,0 +1,7 @@ +#ifndef __NOUVEAU_PPP_H__ +#define __NOUVEAU_PPP_H__ + +extern struct nouveau_oclass nv98_ppp_oclass; +extern struct nouveau_oclass nvc0_ppp_oclass; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/engine/software.h b/drivers/gpu/drm/nouveau/core/include/engine/software.h new file mode 100644 index 0000000..4579948 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/engine/software.h @@ -0,0 +1,60 @@ +#ifndef __NOUVEAU_SOFTWARE_H__ +#define __NOUVEAU_SOFTWARE_H__ + +#include <core/engine.h> +#include <core/engctx.h> +#include <core/event.h> + +struct nouveau_software_chan { + struct nouveau_engctx base; + + struct { + struct nouveau_eventh event; + u32 channel; + u32 ctxdma; + u64 offset; + u32 value; + } vblank; + + int (*flip)(void *); + void *flip_data; +}; + +#define nouveau_software_context_create(p,e,c,d) \ + nouveau_engctx_create((p), (e), (c), (p), 0, 0, 0, (d)) +#define nouveau_software_context_destroy(d) \ + nouveau_engctx_destroy(&(d)->base) +#define nouveau_software_context_init(d) \ + nouveau_engctx_init(&(d)->base) +#define nouveau_software_context_fini(d,s) \ + nouveau_engctx_fini(&(d)->base, (s)) + +#define _nouveau_software_context_dtor _nouveau_engctx_dtor +#define _nouveau_software_context_init _nouveau_engctx_init +#define _nouveau_software_context_fini _nouveau_engctx_fini + +struct nouveau_software { + struct nouveau_engine base; +}; + +#define nouveau_software_create(p,e,c,d) \ + nouveau_engine_create((p), (e), (c), true, "SW", "software", (d)) +#define nouveau_software_destroy(d) \ + nouveau_engine_destroy(&(d)->base) +#define nouveau_software_init(d) \ + nouveau_engine_init(&(d)->base) +#define nouveau_software_fini(d,s) \ + nouveau_engine_fini(&(d)->base, (s)) + +#define _nouveau_software_dtor _nouveau_engine_dtor +#define _nouveau_software_init _nouveau_engine_init +#define _nouveau_software_fini _nouveau_engine_fini + +extern struct nouveau_oclass nv04_software_oclass; +extern struct nouveau_oclass nv10_software_oclass; +extern struct nouveau_oclass nv50_software_oclass; +extern struct nouveau_oclass nvc0_software_oclass; + +void nv04_software_intr(struct nouveau_subdev *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/engine/vp.h b/drivers/gpu/drm/nouveau/core/include/engine/vp.h new file mode 100644 index 0000000..d7b287b --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/engine/vp.h @@ -0,0 +1,8 @@ +#ifndef __NOUVEAU_VP_H__ +#define __NOUVEAU_VP_H__ + +extern struct nouveau_oclass nv84_vp_oclass; +extern struct nouveau_oclass nvc0_vp_oclass; +extern struct nouveau_oclass nve0_vp_oclass; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h new file mode 100644 index 0000000..4f4ff45 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h @@ -0,0 +1,55 @@ +#ifndef __NOUVEAU_BAR_H__ +#define __NOUVEAU_BAR_H__ + +#include <core/subdev.h> +#include <core/device.h> + +#include <subdev/fb.h> + +struct nouveau_vma; + +struct nouveau_bar { + struct nouveau_subdev base; + + int (*alloc)(struct nouveau_bar *, struct nouveau_object *, + struct nouveau_mem *, struct nouveau_object **); + void __iomem *iomem; + + int (*kmap)(struct nouveau_bar *, struct nouveau_mem *, + u32 flags, struct nouveau_vma *); + int (*umap)(struct nouveau_bar *, struct nouveau_mem *, + u32 flags, struct nouveau_vma *); + void (*unmap)(struct nouveau_bar *, struct nouveau_vma *); + void (*flush)(struct nouveau_bar *); +}; + +static inline struct nouveau_bar * +nouveau_bar(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BAR]; +} + +#define nouveau_bar_create(p,e,o,d) \ + nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d) +#define nouveau_bar_init(p) \ + nouveau_subdev_init(&(p)->base) +#define nouveau_bar_fini(p,s) \ + nouveau_subdev_fini(&(p)->base, (s)) + +int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, int, void **); +void nouveau_bar_destroy(struct nouveau_bar *); + +void _nouveau_bar_dtor(struct nouveau_object *); +#define _nouveau_bar_init _nouveau_subdev_init +#define _nouveau_bar_fini _nouveau_subdev_fini + +extern struct nouveau_oclass nv50_bar_oclass; +extern struct nouveau_oclass nvc0_bar_oclass; + +int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *, + struct nouveau_mem *, struct nouveau_object **); + +void nv84_bar_flush(struct nouveau_bar *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h new file mode 100644 index 0000000..5bd1ca8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h @@ -0,0 +1,35 @@ +#ifndef __NOUVEAU_BIOS_H__ +#define __NOUVEAU_BIOS_H__ + +#include <core/subdev.h> +#include <core/device.h> + +struct nouveau_bios { + struct nouveau_subdev base; + u32 size; + u8 *data; + + u32 bmp_offset; + u32 bit_offset; + + struct { + u8 major; + u8 chip; + u8 minor; + u8 micro; + u8 patch; + } version; +}; + +static inline struct nouveau_bios * +nouveau_bios(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VBIOS]; +} + +u8 nvbios_checksum(const u8 *data, int size); +u16 nvbios_findstr(const u8 *data, int size, const char *str, int len); + +extern struct nouveau_oclass nouveau_bios_oclass; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h new file mode 100644 index 0000000..73f060b --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h @@ -0,0 +1,13 @@ +#ifndef __NVBIOS_BIT_H__ +#define __NVBIOS_BIT_H__ + +struct bit_entry { + u8 id; + u8 version; + u16 length; + u16 offset; +}; + +int bit_entry(struct nouveau_bios *, u8 id, struct bit_entry *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h new file mode 100644 index 0000000..10e4dbc --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h @@ -0,0 +1,39 @@ +#ifndef __NVBIOS_BMP_H__ +#define __NVBIOS_BMP_H__ + +static inline u16 +bmp_version(struct nouveau_bios *bios) +{ + if (bios->bmp_offset) { + return nv_ro08(bios, bios->bmp_offset + 5) << 8 | + nv_ro08(bios, bios->bmp_offset + 6); + } + + return 0x0000; +} + +static inline u16 +bmp_mem_init_table(struct nouveau_bios *bios) +{ + if (bmp_version(bios) >= 0x0300) + return nv_ro16(bios, bios->bmp_offset + 24); + return 0x0000; +} + +static inline u16 +bmp_sdr_seq_table(struct nouveau_bios *bios) +{ + if (bmp_version(bios) >= 0x0300) + return nv_ro16(bios, bios->bmp_offset + 26); + return 0x0000; +} + +static inline u16 +bmp_ddr_seq_table(struct nouveau_bios *bios) +{ + if (bmp_version(bios) >= 0x0300) + return nv_ro16(bios, bios->bmp_offset + 28); + return 0x0000; +} + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h new file mode 100644 index 0000000..c127054 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h @@ -0,0 +1,27 @@ +#ifndef __NVBIOS_CONN_H__ +#define __NVBIOS_CONN_H__ + +enum dcb_connector_type { + DCB_CONNECTOR_VGA = 0x00, + DCB_CONNECTOR_TV_0 = 0x10, + DCB_CONNECTOR_TV_1 = 0x11, + DCB_CONNECTOR_TV_3 = 0x13, + DCB_CONNECTOR_DVI_I = 0x30, + DCB_CONNECTOR_DVI_D = 0x31, + DCB_CONNECTOR_DMS59_0 = 0x38, + DCB_CONNECTOR_DMS59_1 = 0x39, + DCB_CONNECTOR_LVDS = 0x40, + DCB_CONNECTOR_LVDS_SPWG = 0x41, + DCB_CONNECTOR_DP = 0x46, + DCB_CONNECTOR_eDP = 0x47, + DCB_CONNECTOR_HDMI_0 = 0x60, + DCB_CONNECTOR_HDMI_1 = 0x61, + DCB_CONNECTOR_DMS59_DP0 = 0x64, + DCB_CONNECTOR_DMS59_DP1 = 0x65, + DCB_CONNECTOR_NONE = 0xff +}; + +u16 dcb_conntab(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len); +u16 dcb_conn(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h new file mode 100644 index 0000000..123270e --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h @@ -0,0 +1,69 @@ +#ifndef __NVBIOS_DCB_H__ +#define __NVBIOS_DCB_H__ + +struct nouveau_bios; + +enum dcb_output_type { + DCB_OUTPUT_ANALOG = 0x0, + DCB_OUTPUT_TV = 0x1, + DCB_OUTPUT_TMDS = 0x2, + DCB_OUTPUT_LVDS = 0x3, + DCB_OUTPUT_DP = 0x6, + DCB_OUTPUT_EOL = 0xe, + DCB_OUTPUT_UNUSED = 0xf, + DCB_OUTPUT_ANY = -1, +}; + +struct dcb_output { + int index; /* may not be raw dcb index if merging has happened */ + u16 hasht; + u16 hashm; + enum dcb_output_type type; + uint8_t i2c_index; + uint8_t heads; + uint8_t connector; + uint8_t bus; + uint8_t location; + uint8_t or; + uint8_t link; + bool duallink_possible; + uint8_t extdev; + union { + struct sor_conf { + int link; + } sorconf; + struct { + int maxfreq; + } crtconf; + struct { + struct sor_conf sor; + bool use_straps_for_mode; + bool use_acpi_for_edid; + bool use_power_scripts; + } lvdsconf; + struct { + bool has_component_output; + } tvconf; + struct { + struct sor_conf sor; + int link_nr; + int link_bw; + } dpconf; + struct { + struct sor_conf sor; + int slave_addr; + } tmdsconf; + }; + bool i2c_upper_default; +}; + +u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len); +u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len); +u16 dcb_outp_parse(struct nouveau_bios *, u8 idx, u8 *, u8 *, + struct dcb_output *); +u16 dcb_outp_match(struct nouveau_bios *, u16 type, u16 mask, u8 *, u8 *, + struct dcb_output *); +int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec) + (struct nouveau_bios *, void *, int index, u16 entry)); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h new file mode 100644 index 0000000..c35937e --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h @@ -0,0 +1,48 @@ +#ifndef __NVBIOS_DISP_H__ +#define __NVBIOS_DISP_H__ + +u16 nvbios_disp_table(struct nouveau_bios *, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub); + +struct nvbios_disp { + u16 data; +}; + +u16 nvbios_disp_entry(struct nouveau_bios *, u8 idx, + u8 *ver, u8 *hdr__, u8 *sub); +u16 nvbios_disp_parse(struct nouveau_bios *, u8 idx, + u8 *ver, u8 *hdr__, u8 *sub, + struct nvbios_disp *); + +struct nvbios_outp { + u16 type; + u16 mask; + u16 script[3]; +}; + +u16 nvbios_outp_entry(struct nouveau_bios *, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len); +u16 nvbios_outp_parse(struct nouveau_bios *, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_outp *); +u16 nvbios_outp_match(struct nouveau_bios *, u16 type, u16 mask, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_outp *); + + +struct nvbios_ocfg { + u16 match; + u16 clkcmp[2]; +}; + +u16 nvbios_ocfg_entry(struct nouveau_bios *, u16 outp, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len); +u16 nvbios_ocfg_parse(struct nouveau_bios *, u16 outp, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_ocfg *); +u16 nvbios_ocfg_match(struct nouveau_bios *, u16 outp, u16 type, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_ocfg *); +u16 nvbios_oclk_match(struct nouveau_bios *, u16 cmp, u32 khz); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h new file mode 100644 index 0000000..6e54218 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h @@ -0,0 +1,34 @@ +#ifndef __NVBIOS_DP_H__ +#define __NVBIOS_DP_H__ + +struct nvbios_dpout { + u16 type; + u16 mask; + u8 flags; + u32 script[5]; + u32 lnkcmp; +}; + +u16 nvbios_dpout_parse(struct nouveau_bios *, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_dpout *); +u16 nvbios_dpout_match(struct nouveau_bios *, u16 type, u16 mask, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_dpout *); + +struct nvbios_dpcfg { + u8 drv; + u8 pre; + u8 unk; +}; + +u16 +nvbios_dpcfg_parse(struct nouveau_bios *, u16 outp, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_dpcfg *); +u16 +nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 un, u8 vs, u8 pe, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_dpcfg *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h new file mode 100644 index 0000000..949fee3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h @@ -0,0 +1,30 @@ +#ifndef __NVBIOS_EXTDEV_H__ +#define __NVBIOS_EXTDEV_H__ + +struct nouveau_bios; + +enum nvbios_extdev_type { + NVBIOS_EXTDEV_LM89 = 0x02, + NVBIOS_EXTDEV_VT1103M = 0x40, + NVBIOS_EXTDEV_PX3540 = 0x41, + NVBIOS_EXTDEV_VT1105M = 0x42, /* or close enough... */ + NVBIOS_EXTDEV_ADT7473 = 0x70, /* can also be a LM64 */ + NVBIOS_EXTDEV_HDCP_EEPROM = 0x90, + NVBIOS_EXTDEV_NONE = 0xff, +}; + +struct nvbios_extdev_func { + u8 type; + u8 addr; + u8 bus; +}; + +int +nvbios_extdev_parse(struct nouveau_bios *, int, struct nvbios_extdev_func *); + +int +nvbios_extdev_find(struct nouveau_bios *, enum nvbios_extdev_type, + struct nvbios_extdev_func *); + + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h new file mode 100644 index 0000000..96d3364 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h @@ -0,0 +1,40 @@ +#ifndef __NVBIOS_GPIO_H__ +#define __NVBIOS_GPIO_H__ + +enum dcb_gpio_func_name { + DCB_GPIO_PANEL_POWER = 0x01, + DCB_GPIO_TVDAC0 = 0x0c, + DCB_GPIO_TVDAC1 = 0x2d, + DCB_GPIO_FAN = 0x09, + DCB_GPIO_FAN_SENSE = 0x3d, + DCB_GPIO_UNUSED = 0xff +}; + +#define DCB_GPIO_LOG_DIR 0x02 +#define DCB_GPIO_LOG_DIR_OUT 0x00 +#define DCB_GPIO_LOG_DIR_IN 0x02 +#define DCB_GPIO_LOG_VAL 0x01 +#define DCB_GPIO_LOG_VAL_LO 0x00 +#define DCB_GPIO_LOG_VAL_HI 0x01 + +struct dcb_gpio_func { + u8 func; + u8 line; + u8 log[2]; + + /* so far, "param" seems to only have an influence on PWM-related + * GPIOs such as FAN_CONTROL and PANEL_BACKLIGHT_LEVEL. + * if param equals 1, hardware PWM is available + * if param equals 0, the host should toggle the GPIO itself + */ + u8 param; +}; + +u16 dcb_gpio_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len); +u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len); +u16 dcb_gpio_parse(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len, + struct dcb_gpio_func *); +u16 dcb_gpio_match(struct nouveau_bios *, int idx, u8 func, u8 line, + u8 *ver, u8 *len, struct dcb_gpio_func *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h new file mode 100644 index 0000000..10b57a1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h @@ -0,0 +1,25 @@ +#ifndef __NVBIOS_I2C_H__ +#define __NVBIOS_I2C_H__ + +struct nouveau_bios; + +enum dcb_i2c_type { + DCB_I2C_NV04_BIT = 0, + DCB_I2C_NV4E_BIT = 4, + DCB_I2C_NVIO_BIT = 5, + DCB_I2C_NVIO_AUX = 6, + DCB_I2C_UNUSED = 0xff +}; + +struct dcb_i2c_entry { + enum dcb_i2c_type type; + u8 drive; + u8 sense; + u8 share; +}; + +u16 dcb_i2c_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len); +u16 dcb_i2c_entry(struct nouveau_bios *, u8 index, u8 *ver, u8 *len); +int dcb_i2c_parse(struct nouveau_bios *, u8 index, struct dcb_i2c_entry *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h new file mode 100644 index 0000000..ca2f6bf --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h @@ -0,0 +1,22 @@ +#ifndef __NVBIOS_INIT_H__ +#define __NVBIOS_INIT_H__ + +struct nvbios_init { + struct nouveau_subdev *subdev; + struct nouveau_bios *bios; + u16 offset; + struct dcb_output *outp; + int crtc; + + /* internal state used during parsing */ + u8 execute; + u32 nested; + u16 repeat; + u16 repend; + u32 ramcfg; +}; + +int nvbios_exec(struct nvbios_init *); +int nvbios_init(struct nouveau_subdev *, bool execute); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h new file mode 100644 index 0000000..5572e60 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h @@ -0,0 +1,9 @@ +#ifndef __NVBIOS_MXM_H__ +#define __NVBIOS_MXM_H__ + +u16 mxm_table(struct nouveau_bios *, u8 *ver, u8 *hdr); + +u8 mxm_sor_map(struct nouveau_bios *, u8 conn); +u8 mxm_ddc_map(struct nouveau_bios *, u8 port); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h new file mode 100644 index 0000000..0b285e9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h @@ -0,0 +1,14 @@ +#ifndef __NVBIOS_PERF_H__ +#define __NVBIOS_PERF_H__ + +struct nouveau_bios; + +struct nvbios_perf_fan { + u32 pwm_divisor; +}; + +int +nvbios_perf_fan_parse(struct nouveau_bios *, struct nvbios_perf_fan *); + + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h new file mode 100644 index 0000000..b2f3d4d --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h @@ -0,0 +1,79 @@ +#ifndef __NVBIOS_PLL_H__ +#define __NVBIOS_PLL_H__ + +/*XXX: kill me */ +struct nouveau_pll_vals { + union { + struct { +#ifdef __BIG_ENDIAN + uint8_t N1, M1, N2, M2; +#else + uint8_t M1, N1, M2, N2; +#endif + }; + struct { + uint16_t NM1, NM2; + } __attribute__((packed)); + }; + int log2P; + + int refclk; +}; + +struct nouveau_bios; + +/* these match types in pll limits table version 0x40, + * nouveau uses them on all chipsets internally where a + * specific pll needs to be referenced, but the exact + * register isn't known. + */ +enum nvbios_pll_type { + PLL_CORE = 0x01, + PLL_SHADER = 0x02, + PLL_UNK03 = 0x03, + PLL_MEMORY = 0x04, + PLL_VDEC = 0x05, + PLL_UNK40 = 0x40, + PLL_UNK41 = 0x41, + PLL_UNK42 = 0x42, + PLL_VPLL0 = 0x80, + PLL_VPLL1 = 0x81, + PLL_VPLL2 = 0x82, + PLL_VPLL3 = 0x83, + PLL_MAX = 0xff +}; + +struct nvbios_pll { + enum nvbios_pll_type type; + u32 reg; + u32 refclk; + + u8 min_p; + u8 max_p; + u8 bias_p; + + /* + * for most pre nv50 cards setting a log2P of 7 (the common max_log2p + * value) is no different to 6 (at least for vplls) so allowing the MNP + * calc to use 7 causes the generated clock to be out by a factor of 2. + * however, max_log2p cannot be fixed-up during parsing as the + * unmodified max_log2p value is still needed for setting mplls, hence + * an additional max_usable_log2p member + */ + u8 max_p_usable; + + struct { + u32 min_freq; + u32 max_freq; + u32 min_inputfreq; + u32 max_inputfreq; + u8 min_m; + u8 max_m; + u8 min_n; + u8 max_n; + } vco1, vco2; +}; + +int nvbios_pll_parse(struct nouveau_bios *, u32 type, struct nvbios_pll *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h new file mode 100644 index 0000000..083541d --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h @@ -0,0 +1,62 @@ +#ifndef __NVBIOS_THERM_H__ +#define __NVBIOS_THERM_H__ + +struct nouveau_bios; + +struct nvbios_therm_threshold { + u8 temp; + u8 hysteresis; +}; + +struct nvbios_therm_sensor { + /* diode */ + s16 slope_mult; + s16 slope_div; + s16 offset_num; + s16 offset_den; + s8 offset_constant; + + /* thresholds */ + struct nvbios_therm_threshold thrs_fan_boost; + struct nvbios_therm_threshold thrs_down_clock; + struct nvbios_therm_threshold thrs_critical; + struct nvbios_therm_threshold thrs_shutdown; +}; + +/* no vbios have more than 6 */ +#define NOUVEAU_TEMP_FAN_TRIP_MAX 10 +struct nouveau_therm_trip_point { + int fan_duty; + int temp; + int hysteresis; +}; + +struct nvbios_therm_fan { + u16 pwm_freq; + + u8 min_duty; + u8 max_duty; + + u16 bump_period; + u16 slow_down_period; + + struct nouveau_therm_trip_point trip[NOUVEAU_TEMP_FAN_TRIP_MAX]; + u8 nr_fan_trip; + u8 linear_min_temp; + u8 linear_max_temp; +}; + +enum nvbios_therm_domain { + NVBIOS_THERM_DOMAIN_CORE, + NVBIOS_THERM_DOMAIN_AMBIENT, +}; + +int +nvbios_therm_sensor_parse(struct nouveau_bios *, enum nvbios_therm_domain, + struct nvbios_therm_sensor *); + +int +nvbios_therm_fan_parse(struct nouveau_bios *, struct nvbios_therm_fan *); + + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h new file mode 100644 index 0000000..360baab --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h @@ -0,0 +1,19 @@ +#ifndef __NVBIOS_XPIO_H__ +#define __NVBIOS_XPIO_H__ + +#define NVBIOS_XPIO_FLAG_AUX 0x10 +#define NVBIOS_XPIO_FLAG_AUX0 0x00 +#define NVBIOS_XPIO_FLAG_AUX1 0x10 + +struct nvbios_xpio { + u8 type; + u8 addr; + u8 flags; +}; + +u16 dcb_xpio_table(struct nouveau_bios *, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len); +u16 dcb_xpio_parse(struct nouveau_bios *, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_xpio *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bus.h b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h new file mode 100644 index 0000000..7d88ec4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h @@ -0,0 +1,41 @@ +#ifndef __NOUVEAU_BUS_H__ +#define __NOUVEAU_BUS_H__ + +#include <core/subdev.h> +#include <core/device.h> + +struct nouveau_bus_intr { + u32 stat; + u32 unit; +}; + +struct nouveau_bus { + struct nouveau_subdev base; +}; + +static inline struct nouveau_bus * +nouveau_bus(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BUS]; +} + +#define nouveau_bus_create(p, e, o, d) \ + nouveau_subdev_create_((p), (e), (o), 0, "PBUS", "master", \ + sizeof(**d), (void **)d) +#define nouveau_bus_destroy(p) \ + nouveau_subdev_destroy(&(p)->base) +#define nouveau_bus_init(p) \ + nouveau_subdev_init(&(p)->base) +#define nouveau_bus_fini(p, s) \ + nouveau_subdev_fini(&(p)->base, (s)) + +#define _nouveau_bus_dtor _nouveau_subdev_dtor +#define _nouveau_bus_init _nouveau_subdev_init +#define _nouveau_bus_fini _nouveau_subdev_fini + +extern struct nouveau_oclass nv04_bus_oclass; +extern struct nouveau_oclass nv31_bus_oclass; +extern struct nouveau_oclass nv50_bus_oclass; +extern struct nouveau_oclass nvc0_bus_oclass; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h new file mode 100644 index 0000000..41b7a6a --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h @@ -0,0 +1,60 @@ +#ifndef __NOUVEAU_CLOCK_H__ +#define __NOUVEAU_CLOCK_H__ + +#include <core/device.h> +#include <core/subdev.h> + +struct nouveau_pll_vals; +struct nvbios_pll; + +struct nouveau_clock { + struct nouveau_subdev base; + + int (*pll_set)(struct nouveau_clock *, u32 type, u32 freq); + + /*XXX: die, these are here *only* to support the completely + * bat-shit insane what-was-nouveau_hw.c code + */ + int (*pll_calc)(struct nouveau_clock *, struct nvbios_pll *, + int clk, struct nouveau_pll_vals *pv); + int (*pll_prog)(struct nouveau_clock *, u32 reg1, + struct nouveau_pll_vals *pv); +}; + +static inline struct nouveau_clock * +nouveau_clock(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_CLOCK]; +} + +#define nouveau_clock_create(p,e,o,d) \ + nouveau_subdev_create((p), (e), (o), 0, "CLOCK", "clock", d) +#define nouveau_clock_destroy(p) \ + nouveau_subdev_destroy(&(p)->base) +#define nouveau_clock_init(p) \ + nouveau_subdev_init(&(p)->base) +#define nouveau_clock_fini(p,s) \ + nouveau_subdev_fini(&(p)->base, (s)) + +int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, int, void **); + +#define _nouveau_clock_dtor _nouveau_subdev_dtor +#define _nouveau_clock_init _nouveau_subdev_init +#define _nouveau_clock_fini _nouveau_subdev_fini + +extern struct nouveau_oclass nv04_clock_oclass; +extern struct nouveau_oclass nv40_clock_oclass; +extern struct nouveau_oclass nv50_clock_oclass; +extern struct nouveau_oclass nva3_clock_oclass; +extern struct nouveau_oclass nvc0_clock_oclass; + +int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq); +int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, + int clk, struct nouveau_pll_vals *); +int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1, + struct nouveau_pll_vals *); +int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, + int clk, struct nouveau_pll_vals *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h new file mode 100644 index 0000000..29e4cc1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h @@ -0,0 +1,40 @@ +#ifndef __NOUVEAU_DEVINIT_H__ +#define __NOUVEAU_DEVINIT_H__ + +#include <core/subdev.h> +#include <core/device.h> + +struct nouveau_devinit { + struct nouveau_subdev base; + bool post; + void (*meminit)(struct nouveau_devinit *); +}; + +static inline struct nouveau_devinit * +nouveau_devinit(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_DEVINIT]; +} + +#define nouveau_devinit_create(p,e,o,d) \ + nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d) +#define nouveau_devinit_destroy(p) \ + nouveau_subdev_destroy(&(p)->base) + +int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, int, void **); +int nouveau_devinit_init(struct nouveau_devinit *); +int nouveau_devinit_fini(struct nouveau_devinit *, bool suspend); + +extern struct nouveau_oclass nv04_devinit_oclass; +extern struct nouveau_oclass nv05_devinit_oclass; +extern struct nouveau_oclass nv10_devinit_oclass; +extern struct nouveau_oclass nv1a_devinit_oclass; +extern struct nouveau_oclass nv20_devinit_oclass; +extern struct nouveau_oclass nv50_devinit_oclass; + +void nv04_devinit_dtor(struct nouveau_object *); +int nv04_devinit_init(struct nouveau_object *); +int nv04_devinit_fini(struct nouveau_object *, bool); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h new file mode 100644 index 0000000..da470e6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h @@ -0,0 +1,171 @@ +#ifndef __NOUVEAU_FB_H__ +#define __NOUVEAU_FB_H__ + +#include <core/subdev.h> +#include <core/device.h> +#include <core/mm.h> + +#include <subdev/vm.h> + +/* memory type/access flags, do not match hardware values */ +#define NV_MEM_ACCESS_RO 1 +#define NV_MEM_ACCESS_WO 2 +#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO) +#define NV_MEM_ACCESS_SYS 4 +#define NV_MEM_ACCESS_VM 8 +#define NV_MEM_ACCESS_NOSNOOP 16 + +#define NV_MEM_TARGET_VRAM 0 +#define NV_MEM_TARGET_PCI 1 +#define NV_MEM_TARGET_PCI_NOSNOOP 2 +#define NV_MEM_TARGET_VM 3 +#define NV_MEM_TARGET_GART 4 + +#define NV_MEM_TYPE_VM 0x7f +#define NV_MEM_COMP_VM 0x03 + +struct nouveau_mem { + struct drm_device *dev; + + struct nouveau_vma bar_vma; + struct nouveau_vma vma[2]; + u8 page_shift; + + struct nouveau_mm_node *tag; + struct list_head regions; + dma_addr_t *pages; + u32 memtype; + u64 offset; + u64 size; + struct sg_table *sg; +}; + +struct nouveau_fb_tile { + struct nouveau_mm_node *tag; + u32 addr; + u32 limit; + u32 pitch; + u32 zcomp; +}; + +struct nouveau_fb { + struct nouveau_subdev base; + + bool (*memtype_valid)(struct nouveau_fb *, u32 memtype); + + struct { + enum { + NV_MEM_TYPE_UNKNOWN = 0, + NV_MEM_TYPE_STOLEN, + NV_MEM_TYPE_SGRAM, + NV_MEM_TYPE_SDRAM, + NV_MEM_TYPE_DDR1, + NV_MEM_TYPE_DDR2, + NV_MEM_TYPE_DDR3, + NV_MEM_TYPE_GDDR2, + NV_MEM_TYPE_GDDR3, + NV_MEM_TYPE_GDDR4, + NV_MEM_TYPE_GDDR5 + } type; + u64 stolen; + u64 size; + + int ranks; + int parts; + + int (*init)(struct nouveau_fb *); + int (*get)(struct nouveau_fb *, u64 size, u32 align, + u32 size_nc, u32 type, struct nouveau_mem **); + void (*put)(struct nouveau_fb *, struct nouveau_mem **); + } ram; + + struct nouveau_mm vram; + struct nouveau_mm tags; + + struct { + struct nouveau_fb_tile region[16]; + int regions; + void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size, + u32 pitch, u32 flags, struct nouveau_fb_tile *); + void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags, + struct nouveau_fb_tile *); + void (*fini)(struct nouveau_fb *, int i, + struct nouveau_fb_tile *); + void (*prog)(struct nouveau_fb *, int i, + struct nouveau_fb_tile *); + } tile; +}; + +static inline struct nouveau_fb * +nouveau_fb(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB]; +} + +#define nouveau_fb_create(p,e,c,d) \ + nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d)) +int nouveau_fb_preinit(struct nouveau_fb *); +void nouveau_fb_destroy(struct nouveau_fb *); +int nouveau_fb_init(struct nouveau_fb *); +#define nouveau_fb_fini(p,s) \ + nouveau_subdev_fini(&(p)->base, (s)) + +void _nouveau_fb_dtor(struct nouveau_object *); +int _nouveau_fb_init(struct nouveau_object *); +#define _nouveau_fb_fini _nouveau_subdev_fini + +extern struct nouveau_oclass nv04_fb_oclass; +extern struct nouveau_oclass nv10_fb_oclass; +extern struct nouveau_oclass nv1a_fb_oclass; +extern struct nouveau_oclass nv20_fb_oclass; +extern struct nouveau_oclass nv25_fb_oclass; +extern struct nouveau_oclass nv30_fb_oclass; +extern struct nouveau_oclass nv35_fb_oclass; +extern struct nouveau_oclass nv36_fb_oclass; +extern struct nouveau_oclass nv40_fb_oclass; +extern struct nouveau_oclass nv41_fb_oclass; +extern struct nouveau_oclass nv44_fb_oclass; +extern struct nouveau_oclass nv46_fb_oclass; +extern struct nouveau_oclass nv47_fb_oclass; +extern struct nouveau_oclass nv49_fb_oclass; +extern struct nouveau_oclass nv4e_fb_oclass; +extern struct nouveau_oclass nv50_fb_oclass; +extern struct nouveau_oclass nvc0_fb_oclass; + +struct nouveau_bios; +int nouveau_fb_bios_memtype(struct nouveau_bios *); + +bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype); + +void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, + u32 pitch, u32 flags, struct nouveau_fb_tile *); +void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *); +void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); + +int nv20_fb_vram_init(struct nouveau_fb *); +void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, + u32 pitch, u32 flags, struct nouveau_fb_tile *); +void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *); +void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); + +int nv30_fb_init(struct nouveau_object *); +void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, + u32 pitch, u32 flags, struct nouveau_fb_tile *); + +void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags, + struct nouveau_fb_tile *); + +int nv41_fb_vram_init(struct nouveau_fb *); +int nv41_fb_init(struct nouveau_object *); +void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); + +int nv44_fb_vram_init(struct nouveau_fb *); +int nv44_fb_init(struct nouveau_object *); +void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); + +void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, + u32 pitch, u32 flags, struct nouveau_fb_tile *); + +void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h new file mode 100644 index 0000000..c85b9f1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h @@ -0,0 +1,53 @@ +#ifndef __NOUVEAU_GPIO_H__ +#define __NOUVEAU_GPIO_H__ + +#include <core/subdev.h> +#include <core/device.h> +#include <core/event.h> + +#include <subdev/bios.h> +#include <subdev/bios/gpio.h> + +struct nouveau_gpio { + struct nouveau_subdev base; + + struct nouveau_event *events; + + /* hardware interfaces */ + void (*reset)(struct nouveau_gpio *, u8 func); + int (*drive)(struct nouveau_gpio *, int line, int dir, int out); + int (*sense)(struct nouveau_gpio *, int line); + + /* software interfaces */ + int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line, + struct dcb_gpio_func *); + int (*set)(struct nouveau_gpio *, int idx, u8 tag, u8 line, int state); + int (*get)(struct nouveau_gpio *, int idx, u8 tag, u8 line); +}; + +static inline struct nouveau_gpio * +nouveau_gpio(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_GPIO]; +} + +#define nouveau_gpio_create(p,e,o,l,d) \ + nouveau_gpio_create_((p), (e), (o), (l), sizeof(**d), (void **)d) +#define nouveau_gpio_destroy(p) ({ \ + struct nouveau_gpio *gpio = (p); \ + _nouveau_gpio_dtor(nv_object(gpio)); \ +}) +#define nouveau_gpio_fini(p,s) \ + nouveau_subdev_fini(&(p)->base, (s)) + +int nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, int, int, void **); +void _nouveau_gpio_dtor(struct nouveau_object *); +int nouveau_gpio_init(struct nouveau_gpio *); + +extern struct nouveau_oclass nv10_gpio_oclass; +extern struct nouveau_oclass nv50_gpio_oclass; +extern struct nouveau_oclass nvd0_gpio_oclass; +extern struct nouveau_oclass nve0_gpio_oclass; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h new file mode 100644 index 0000000..888384c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h @@ -0,0 +1,151 @@ +#ifndef __NOUVEAU_I2C_H__ +#define __NOUVEAU_I2C_H__ + +#include <core/subdev.h> +#include <core/device.h> + +#include <subdev/bios.h> +#include <subdev/bios/i2c.h> + +#define NV_I2C_PORT(n) (0x00 + (n)) +#define NV_I2C_DEFAULT(n) (0x80 + (n)) + +#define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n)) +#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8) +#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8) + +struct nouveau_i2c_port { + struct nouveau_object base; + struct i2c_adapter adapter; + + struct list_head head; + u8 index; + + const struct nouveau_i2c_func *func; +}; + +struct nouveau_i2c_func { + void (*acquire)(struct nouveau_i2c_port *); + void (*release)(struct nouveau_i2c_port *); + + void (*drive_scl)(struct nouveau_i2c_port *, int); + void (*drive_sda)(struct nouveau_i2c_port *, int); + int (*sense_scl)(struct nouveau_i2c_port *); + int (*sense_sda)(struct nouveau_i2c_port *); + + int (*aux)(struct nouveau_i2c_port *, u8, u32, u8 *, u8); + int (*pattern)(struct nouveau_i2c_port *, int pattern); + int (*lnk_ctl)(struct nouveau_i2c_port *, int nr, int bw, bool enh); + int (*drv_ctl)(struct nouveau_i2c_port *, int lane, int sw, int pe); +}; + +#define nouveau_i2c_port_create(p,e,o,i,a,d) \ + nouveau_i2c_port_create_((p), (e), (o), (i), (a), \ + sizeof(**d), (void **)d) +#define nouveau_i2c_port_destroy(p) ({ \ + struct nouveau_i2c_port *port = (p); \ + _nouveau_i2c_port_dtor(nv_object(i2c)); \ +}) +#define nouveau_i2c_port_init(p) \ + nouveau_object_init(&(p)->base) +#define nouveau_i2c_port_fini(p,s) \ + nouveau_object_fini(&(p)->base, (s)) + +int nouveau_i2c_port_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, u8, + const struct i2c_algorithm *, int, void **); +void _nouveau_i2c_port_dtor(struct nouveau_object *); +#define _nouveau_i2c_port_init nouveau_object_init +#define _nouveau_i2c_port_fini nouveau_object_fini + +struct nouveau_i2c { + struct nouveau_subdev base; + + struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index); + struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type); + int (*identify)(struct nouveau_i2c *, int index, + const char *what, struct i2c_board_info *, + bool (*match)(struct nouveau_i2c_port *, + struct i2c_board_info *)); + struct list_head ports; +}; + +static inline struct nouveau_i2c * +nouveau_i2c(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_I2C]; +} + +#define nouveau_i2c_create(p,e,o,s,d) \ + nouveau_i2c_create_((p), (e), (o), (s), sizeof(**d), (void **)d) +#define nouveau_i2c_destroy(p) ({ \ + struct nouveau_i2c *i2c = (p); \ + _nouveau_i2c_dtor(nv_object(i2c)); \ +}) +#define nouveau_i2c_init(p) ({ \ + struct nouveau_i2c *i2c = (p); \ + _nouveau_i2c_init(nv_object(i2c)); \ +}) +#define nouveau_i2c_fini(p,s) ({ \ + struct nouveau_i2c *i2c = (p); \ + _nouveau_i2c_fini(nv_object(i2c), (s)); \ +}) + +int nouveau_i2c_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, struct nouveau_oclass *, + int, void **); +void _nouveau_i2c_dtor(struct nouveau_object *); +int _nouveau_i2c_init(struct nouveau_object *); +int _nouveau_i2c_fini(struct nouveau_object *, bool); + +extern struct nouveau_oclass nv04_i2c_oclass; +extern struct nouveau_oclass nv4e_i2c_oclass; +extern struct nouveau_oclass nv50_i2c_oclass; +extern struct nouveau_oclass nv94_i2c_oclass; +extern struct nouveau_oclass nvd0_i2c_oclass; +extern struct nouveau_oclass nouveau_anx9805_sclass[]; + +extern const struct i2c_algorithm nouveau_i2c_bit_algo; +extern const struct i2c_algorithm nouveau_i2c_aux_algo; + +static inline int +nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg) +{ + u8 val; + struct i2c_msg msgs[] = { + { .addr = addr, .flags = 0, .len = 1, .buf = ® }, + { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val }, + }; + + int ret = i2c_transfer(&port->adapter, msgs, 2); + if (ret != 2) + return -EIO; + + return val; +} + +static inline int +nv_wri2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg, u8 val) +{ + u8 buf[2] = { reg, val }; + struct i2c_msg msgs[] = { + { .addr = addr, .flags = 0, .len = 2, .buf = buf }, + }; + + int ret = i2c_transfer(&port->adapter, msgs, 1); + if (ret != 1) + return -EIO; + + return 0; +} + +static inline bool +nv_probe_i2c(struct nouveau_i2c_port *port, u8 addr) +{ + return nv_rdi2cr(port, addr, 0) >= 0; +} + +int nv_rdaux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size); +int nv_wraux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h b/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h new file mode 100644 index 0000000..88814f1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h @@ -0,0 +1,34 @@ +#ifndef __NOUVEAU_IBUS_H__ +#define __NOUVEAU_IBUS_H__ + +#include <core/subdev.h> +#include <core/device.h> + +struct nouveau_ibus { + struct nouveau_subdev base; +}; + +static inline struct nouveau_ibus * +nouveau_ibus(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_IBUS]; +} + +#define nouveau_ibus_create(p,e,o,d) \ + nouveau_subdev_create_((p), (e), (o), 0, "PIBUS", "ibus", \ + sizeof(**d), (void **)d) +#define nouveau_ibus_destroy(p) \ + nouveau_subdev_destroy(&(p)->base) +#define nouveau_ibus_init(p) \ + nouveau_subdev_init(&(p)->base) +#define nouveau_ibus_fini(p,s) \ + nouveau_subdev_fini(&(p)->base, (s)) + +#define _nouveau_ibus_dtor _nouveau_subdev_dtor +#define _nouveau_ibus_init _nouveau_subdev_init +#define _nouveau_ibus_fini _nouveau_subdev_fini + +extern struct nouveau_oclass nvc0_ibus_oclass; +extern struct nouveau_oclass nve0_ibus_oclass; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h new file mode 100644 index 0000000..7ab36af --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h @@ -0,0 +1,73 @@ +#ifndef __NOUVEAU_INSTMEM_H__ +#define __NOUVEAU_INSTMEM_H__ + +#include <core/subdev.h> +#include <core/device.h> +#include <core/mm.h> + +struct nouveau_instobj { + struct nouveau_object base; + struct list_head head; + u32 *suspend; + u64 addr; + u32 size; +}; + +static inline struct nouveau_instobj * +nv_memobj(void *obj) +{ +#if CPTCFG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA + if (unlikely(!nv_iclass(obj, NV_MEMOBJ_CLASS))) + nv_assert("BAD CAST -> NvMemObj, %08x", nv_hclass(obj)); +#endif + return obj; +} + +#define nouveau_instobj_create(p,e,o,d) \ + nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d) +#define nouveau_instobj_init(p) \ + nouveau_object_init(&(p)->base) +#define nouveau_instobj_fini(p,s) \ + nouveau_object_fini(&(p)->base, (s)) + +int nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, int, void **); +void nouveau_instobj_destroy(struct nouveau_instobj *); + +void _nouveau_instobj_dtor(struct nouveau_object *); +#define _nouveau_instobj_init nouveau_object_init +#define _nouveau_instobj_fini nouveau_object_fini + +struct nouveau_instmem { + struct nouveau_subdev base; + struct list_head list; + + u32 reserved; + int (*alloc)(struct nouveau_instmem *, struct nouveau_object *, + u32 size, u32 align, struct nouveau_object **); +}; + +static inline struct nouveau_instmem * +nouveau_instmem(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM]; +} + +#define nouveau_instmem_create(p,e,o,d) \ + nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d) +#define nouveau_instmem_destroy(p) \ + nouveau_subdev_destroy(&(p)->base) +int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, int, void **); +int nouveau_instmem_init(struct nouveau_instmem *); +int nouveau_instmem_fini(struct nouveau_instmem *, bool); + +#define _nouveau_instmem_dtor _nouveau_subdev_dtor +int _nouveau_instmem_init(struct nouveau_object *); +int _nouveau_instmem_fini(struct nouveau_object *, bool); + +extern struct nouveau_oclass nv04_instmem_oclass; +extern struct nouveau_oclass nv40_instmem_oclass; +extern struct nouveau_oclass nv50_instmem_oclass; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h new file mode 100644 index 0000000..a1985ed --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h @@ -0,0 +1,40 @@ +#ifndef __NOUVEAU_LTCG_H__ +#define __NOUVEAU_LTCG_H__ + +#include <core/subdev.h> +#include <core/device.h> + +struct nouveau_mm_node; + +struct nouveau_ltcg { + struct nouveau_subdev base; + + int (*tags_alloc)(struct nouveau_ltcg *, u32 count, + struct nouveau_mm_node **); + void (*tags_free)(struct nouveau_ltcg *, struct nouveau_mm_node **); + void (*tags_clear)(struct nouveau_ltcg *, u32 first, u32 count); +}; + +static inline struct nouveau_ltcg * +nouveau_ltcg(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_LTCG]; +} + +#define nouveau_ltcg_create(p,e,o,d) \ + nouveau_subdev_create_((p), (e), (o), 0, "PLTCG", "level2", \ + sizeof(**d), (void **)d) +#define nouveau_ltcg_destroy(p) \ + nouveau_subdev_destroy(&(p)->base) +#define nouveau_ltcg_init(p) \ + nouveau_subdev_init(&(p)->base) +#define nouveau_ltcg_fini(p,s) \ + nouveau_subdev_fini(&(p)->base, (s)) + +#define _nouveau_ltcg_dtor _nouveau_subdev_dtor +#define _nouveau_ltcg_init _nouveau_subdev_init +#define _nouveau_ltcg_fini _nouveau_subdev_fini + +extern struct nouveau_oclass nvc0_ltcg_oclass; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h new file mode 100644 index 0000000..9d2cd20 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h @@ -0,0 +1,52 @@ +#ifndef __NOUVEAU_MC_H__ +#define __NOUVEAU_MC_H__ + +#include <core/subdev.h> +#include <core/device.h> + +struct nouveau_mc_intr { + u32 stat; + u32 unit; +}; + +struct nouveau_mc { + struct nouveau_subdev base; + const struct nouveau_mc_intr *intr_map; +}; + +static inline struct nouveau_mc * +nouveau_mc(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; +} + +#define nouveau_mc_create(p,e,o,m,d) \ + nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d) +#define nouveau_mc_destroy(p) ({ \ + struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ +}) +#define nouveau_mc_init(p) ({ \ + struct nouveau_mc *pmc = (p); _nouveau_mc_init(nv_object(pmc)); \ +}) +#define nouveau_mc_fini(p,s) ({ \ + struct nouveau_mc *pmc = (p); _nouveau_mc_fini(nv_object(pmc), (s)); \ +}) + +int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, const struct nouveau_mc_intr *, + int, void **); +void _nouveau_mc_dtor(struct nouveau_object *); +int _nouveau_mc_init(struct nouveau_object *); +int _nouveau_mc_fini(struct nouveau_object *, bool); + +extern struct nouveau_oclass nv04_mc_oclass; +extern struct nouveau_oclass nv44_mc_oclass; +extern struct nouveau_oclass nv50_mc_oclass; +extern struct nouveau_oclass nv98_mc_oclass; +extern struct nouveau_oclass nvc0_mc_oclass; + +extern const struct nouveau_mc_intr nv04_mc_intr[]; +int nv04_mc_init(struct nouveau_object *); +int nv50_mc_init(struct nouveau_object *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h b/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h new file mode 100644 index 0000000..b93b152 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h @@ -0,0 +1,37 @@ +#ifndef __NOUVEAU_MXM_H__ +#define __NOUVEAU_MXM_H__ + +#include <core/subdev.h> +#include <core/device.h> + +#define MXM_SANITISE_DCB 0x00000001 + +struct nouveau_mxm { + struct nouveau_subdev base; + u32 action; + u8 *mxms; +}; + +static inline struct nouveau_mxm * +nouveau_mxm(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MXM]; +} + +#define nouveau_mxm_create(p,e,o,d) \ + nouveau_mxm_create_((p), (e), (o), sizeof(**d), (void **)d) +#define nouveau_mxm_init(p) \ + nouveau_subdev_init(&(p)->base) +#define nouveau_mxm_fini(p,s) \ + nouveau_subdev_fini(&(p)->base, (s)) +int nouveau_mxm_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, int, void **); +void nouveau_mxm_destroy(struct nouveau_mxm *); + +#define _nouveau_mxm_dtor _nouveau_subdev_dtor +#define _nouveau_mxm_init _nouveau_subdev_init +#define _nouveau_mxm_fini _nouveau_subdev_fini + +extern struct nouveau_oclass nv50_mxm_oclass; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h new file mode 100644 index 0000000..c075998 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h @@ -0,0 +1,80 @@ +#ifndef __NOUVEAU_THERM_H__ +#define __NOUVEAU_THERM_H__ + +#include <core/device.h> +#include <core/subdev.h> + +enum nouveau_therm_fan_mode { + NOUVEAU_THERM_CTRL_NONE = 0, + NOUVEAU_THERM_CTRL_MANUAL = 1, + NOUVEAU_THERM_CTRL_AUTO = 2, +}; + +enum nouveau_therm_attr_type { + NOUVEAU_THERM_ATTR_FAN_MIN_DUTY = 0, + NOUVEAU_THERM_ATTR_FAN_MAX_DUTY = 1, + NOUVEAU_THERM_ATTR_FAN_MODE = 2, + + NOUVEAU_THERM_ATTR_THRS_FAN_BOOST = 10, + NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST = 11, + NOUVEAU_THERM_ATTR_THRS_DOWN_CLK = 12, + NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST = 13, + NOUVEAU_THERM_ATTR_THRS_CRITICAL = 14, + NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST = 15, + NOUVEAU_THERM_ATTR_THRS_SHUTDOWN = 16, + NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST = 17, +}; + +struct nouveau_therm { + struct nouveau_subdev base; + + int (*pwm_ctrl)(struct nouveau_therm *, int line, bool); + int (*pwm_get)(struct nouveau_therm *, int line, u32 *, u32 *); + int (*pwm_set)(struct nouveau_therm *, int line, u32, u32); + int (*pwm_clock)(struct nouveau_therm *); + + int (*fan_get)(struct nouveau_therm *); + int (*fan_set)(struct nouveau_therm *, int); + int (*fan_sense)(struct nouveau_therm *); + + int (*temp_get)(struct nouveau_therm *); + + int (*attr_get)(struct nouveau_therm *, enum nouveau_therm_attr_type); + int (*attr_set)(struct nouveau_therm *, + enum nouveau_therm_attr_type, int); +}; + +static inline struct nouveau_therm * +nouveau_therm(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_THERM]; +} + +#define nouveau_therm_create(p,e,o,d) \ + nouveau_therm_create_((p), (e), (o), sizeof(**d), (void **)d) +#define nouveau_therm_destroy(p) ({ \ + struct nouveau_therm *therm = (p); \ + _nouveau_therm_dtor(nv_object(therm)); \ +}) +#define nouveau_therm_init(p) ({ \ + struct nouveau_therm *therm = (p); \ + _nouveau_therm_init(nv_object(therm)); \ +}) +#define nouveau_therm_fini(p,s) ({ \ + struct nouveau_therm *therm = (p); \ + _nouveau_therm_init(nv_object(therm), (s)); \ +}) + +int nouveau_therm_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, int, void **); +void _nouveau_therm_dtor(struct nouveau_object *); +int _nouveau_therm_init(struct nouveau_object *); +int _nouveau_therm_fini(struct nouveau_object *, bool); + +extern struct nouveau_oclass nv40_therm_oclass; +extern struct nouveau_oclass nv50_therm_oclass; +extern struct nouveau_oclass nv84_therm_oclass; +extern struct nouveau_oclass nva3_therm_oclass; +extern struct nouveau_oclass nvd0_therm_oclass; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h new file mode 100644 index 0000000..e465d15 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h @@ -0,0 +1,61 @@ +#ifndef __NOUVEAU_TIMER_H__ +#define __NOUVEAU_TIMER_H__ + +#include <core/subdev.h> +#include <core/device.h> + +struct nouveau_alarm { + struct list_head head; + u64 timestamp; + void (*func)(struct nouveau_alarm *); +}; + +static inline void +nouveau_alarm_init(struct nouveau_alarm *alarm, + void (*func)(struct nouveau_alarm *)) +{ + INIT_LIST_HEAD(&alarm->head); + alarm->func = func; +} + +bool nouveau_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data); +bool nouveau_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data); +bool nouveau_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data); +void nouveau_timer_alarm(void *, u32 nsec, struct nouveau_alarm *); + +#define NV_WAIT_DEFAULT 2000000000ULL +#define nv_wait(o,a,m,v) \ + nouveau_timer_wait_eq((o), NV_WAIT_DEFAULT, (a), (m), (v)) +#define nv_wait_ne(o,a,m,v) \ + nouveau_timer_wait_ne((o), NV_WAIT_DEFAULT, (a), (m), (v)) +#define nv_wait_cb(o,c,d) \ + nouveau_timer_wait_cb((o), NV_WAIT_DEFAULT, (c), (d)) + +struct nouveau_timer { + struct nouveau_subdev base; + u64 (*read)(struct nouveau_timer *); + void (*alarm)(struct nouveau_timer *, u64 time, struct nouveau_alarm *); +}; + +static inline struct nouveau_timer * +nouveau_timer(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_TIMER]; +} + +#define nouveau_timer_create(p,e,o,d) \ + nouveau_subdev_create_((p), (e), (o), 0, "PTIMER", "timer", \ + sizeof(**d), (void **)d) +#define nouveau_timer_destroy(p) \ + nouveau_subdev_destroy(&(p)->base) +#define nouveau_timer_init(p) \ + nouveau_subdev_init(&(p)->base) +#define nouveau_timer_fini(p,s) \ + nouveau_subdev_fini(&(p)->base, (s)) + +int nouveau_timer_create_(struct nouveau_object *, struct nouveau_engine *, + struct nouveau_oclass *, int size, void **); + +extern struct nouveau_oclass nv04_timer_oclass; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vga.h b/drivers/gpu/drm/nouveau/core/include/subdev/vga.h new file mode 100644 index 0000000..fee09ad --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/vga.h @@ -0,0 +1,30 @@ +#ifndef __NOUVEAU_VGA_H__ +#define __NOUVEAU_VGA_H__ + +#include <core/os.h> + +/* access to various legacy io ports */ +u8 nv_rdport(void *obj, int head, u16 port); +void nv_wrport(void *obj, int head, u16 port, u8 value); + +/* VGA Sequencer */ +u8 nv_rdvgas(void *obj, int head, u8 index); +void nv_wrvgas(void *obj, int head, u8 index, u8 value); + +/* VGA Graphics */ +u8 nv_rdvgag(void *obj, int head, u8 index); +void nv_wrvgag(void *obj, int head, u8 index, u8 value); + +/* VGA CRTC */ +u8 nv_rdvgac(void *obj, int head, u8 index); +void nv_wrvgac(void *obj, int head, u8 index, u8 value); + +/* VGA indexed port access dispatcher */ +u8 nv_rdvgai(void *obj, int head, u16 port, u8 index); +void nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value); + +bool nv_lockvgac(void *obj, bool lock); +u8 nv_rdvgaowner(void *obj); +void nv_wrvgaowner(void *obj, u8); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h new file mode 100644 index 0000000..9d595ef --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h @@ -0,0 +1,142 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#ifndef __NOUVEAU_VM_H__ +#define __NOUVEAU_VM_H__ + +#include <core/object.h> +#include <core/subdev.h> +#include <core/device.h> +#include <core/mm.h> + +struct nouveau_vm_pgt { + struct nouveau_gpuobj *obj[2]; + u32 refcount[2]; +}; + +struct nouveau_vm_pgd { + struct list_head head; + struct nouveau_gpuobj *obj; +}; + +struct nouveau_gpuobj; +struct nouveau_mem; + +struct nouveau_vma { + struct list_head head; + int refcount; + struct nouveau_vm *vm; + struct nouveau_mm_node *node; + u64 offset; + u32 access; +}; + +struct nouveau_vm { + struct nouveau_vmmgr *vmm; + struct nouveau_mm mm; + int refcount; + + struct list_head pgd_list; + atomic_t engref[64]; //NVDEV_SUBDEV_NR]; + + struct nouveau_vm_pgt *pgt; + u32 fpde; + u32 lpde; +}; + +struct nouveau_vmmgr { + struct nouveau_subdev base; + + u64 limit; + u8 dma_bits; + u32 pgt_bits; + u8 spg_shift; + u8 lpg_shift; + + int (*create)(struct nouveau_vmmgr *, u64 offset, u64 length, + u64 mm_offset, struct nouveau_vm **); + + void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde, + struct nouveau_gpuobj *pgt[2]); + void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *, + struct nouveau_mem *, u32 pte, u32 cnt, + u64 phys, u64 delta); + void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *, + struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *); + void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt); + void (*flush)(struct nouveau_vm *); +}; + +static inline struct nouveau_vmmgr * +nouveau_vmmgr(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VM]; +} + +#define nouveau_vmmgr_create(p,e,o,i,f,d) \ + nouveau_subdev_create((p), (e), (o), 0, (i), (f), (d)) +#define nouveau_vmmgr_destroy(p) \ + nouveau_subdev_destroy(&(p)->base) +#define nouveau_vmmgr_init(p) \ + nouveau_subdev_init(&(p)->base) +#define nouveau_vmmgr_fini(p,s) \ + nouveau_subdev_fini(&(p)->base, (s)) + +#define _nouveau_vmmgr_dtor _nouveau_subdev_dtor +#define _nouveau_vmmgr_init _nouveau_subdev_init +#define _nouveau_vmmgr_fini _nouveau_subdev_fini + +extern struct nouveau_oclass nv04_vmmgr_oclass; +extern struct nouveau_oclass nv41_vmmgr_oclass; +extern struct nouveau_oclass nv44_vmmgr_oclass; +extern struct nouveau_oclass nv50_vmmgr_oclass; +extern struct nouveau_oclass nvc0_vmmgr_oclass; + +int nv04_vm_create(struct nouveau_vmmgr *, u64, u64, u64, + struct nouveau_vm **); +void nv04_vmmgr_dtor(struct nouveau_object *); + +void nv50_vm_flush_engine(struct nouveau_subdev *, int engine); +void nvc0_vm_flush_engine(struct nouveau_subdev *, u64 addr, int type); + +/* nouveau_vm.c */ +int nouveau_vm_create(struct nouveau_vmmgr *, u64 offset, u64 length, + u64 mm_offset, u32 block, struct nouveau_vm **); +int nouveau_vm_new(struct nouveau_device *, u64 offset, u64 length, + u64 mm_offset, struct nouveau_vm **); +int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **, + struct nouveau_gpuobj *pgd); +int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift, + u32 access, struct nouveau_vma *); +void nouveau_vm_put(struct nouveau_vma *); +void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *); +void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *); +void nouveau_vm_unmap(struct nouveau_vma *); +void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length); +void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length, + struct nouveau_mem *); +void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length, + struct nouveau_mem *mem); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h new file mode 100644 index 0000000..3bd9be2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/os.h @@ -0,0 +1,49 @@ +#ifndef __NOUVEAU_OS_H__ +#define __NOUVEAU_OS_H__ + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/printk.h> +#include <linux/bitops.h> +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/i2c-algo-bit.h> +#include <linux/delay.h> +#include <linux/io-mapping.h> +#include <linux/vmalloc.h> +#include <linux/acpi.h> +#include <linux/dmi.h> +#include <linux/reboot.h> +#include <linux/interrupt.h> + +#include <asm/unaligned.h> + +static inline int +ffsll(u64 mask) +{ + int i; + for (i = 0; i < 64; i++) { + if (mask & (1ULL << i)) + return i + 1; + } + return 0; +} + +#ifndef ioread32_native +#ifdef __BIG_ENDIAN +#define ioread16_native ioread16be +#define iowrite16_native iowrite16be +#define ioread32_native ioread32be +#define iowrite32_native iowrite32be +#else /* def __BIG_ENDIAN */ +#define ioread16_native ioread16 +#define iowrite16_native iowrite16 +#define ioread32_native ioread32 +#define iowrite32_native iowrite32 +#endif /* def __BIG_ENDIAN else */ +#endif /* !ioread32_native */ + +#endif diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c new file mode 100644 index 0000000..d70ba34 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c @@ -0,0 +1,135 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <subdev/bar.h> + +struct nouveau_barobj { + struct nouveau_object base; + struct nouveau_vma vma; + void __iomem *iomem; +}; + +static int +nouveau_barobj_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *mem, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_bar *bar = (void *)engine; + struct nouveau_barobj *barobj; + int ret; + + ret = nouveau_object_create(parent, engine, oclass, 0, &barobj); + *pobject = nv_object(barobj); + if (ret) + return ret; + + ret = bar->kmap(bar, mem, NV_MEM_ACCESS_RW, &barobj->vma); + if (ret) + return ret; + + barobj->iomem = bar->iomem + (u32)barobj->vma.offset; + return 0; +} + +static void +nouveau_barobj_dtor(struct nouveau_object *object) +{ + struct nouveau_bar *bar = (void *)object->engine; + struct nouveau_barobj *barobj = (void *)object; + if (barobj->vma.node) + bar->unmap(bar, &barobj->vma); + nouveau_object_destroy(&barobj->base); +} + +static u32 +nouveau_barobj_rd32(struct nouveau_object *object, u64 addr) +{ + struct nouveau_barobj *barobj = (void *)object; + return ioread32_native(barobj->iomem + addr); +} + +static void +nouveau_barobj_wr32(struct nouveau_object *object, u64 addr, u32 data) +{ + struct nouveau_barobj *barobj = (void *)object; + iowrite32_native(data, barobj->iomem + addr); +} + +static struct nouveau_oclass +nouveau_barobj_oclass = { + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nouveau_barobj_ctor, + .dtor = nouveau_barobj_dtor, + .init = nouveau_object_init, + .fini = nouveau_object_fini, + .rd32 = nouveau_barobj_rd32, + .wr32 = nouveau_barobj_wr32, + }, +}; + +int +nouveau_bar_alloc(struct nouveau_bar *bar, struct nouveau_object *parent, + struct nouveau_mem *mem, struct nouveau_object **pobject) +{ + struct nouveau_object *engine = nv_object(bar); + return nouveau_object_ctor(parent, engine, &nouveau_barobj_oclass, + mem, 0, pobject); +} + +int +nouveau_bar_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, int length, void **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct nouveau_bar *bar; + int ret; + + ret = nouveau_subdev_create_(parent, engine, oclass, 0, "BARCTL", + "bar", length, pobject); + bar = *pobject; + if (ret) + return ret; + + bar->iomem = ioremap(pci_resource_start(device->pdev, 3), + pci_resource_len(device->pdev, 3)); + return 0; +} + +void +nouveau_bar_destroy(struct nouveau_bar *bar) +{ + if (bar->iomem) + iounmap(bar->iomem); + nouveau_subdev_destroy(&bar->base); +} + +void +_nouveau_bar_dtor(struct nouveau_object *object) +{ + struct nouveau_bar *bar = (void *)object; + nouveau_bar_destroy(bar); +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c new file mode 100644 index 0000000..649f1ce --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c @@ -0,0 +1,265 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/gpuobj.h> + +#include <subdev/timer.h> +#include <subdev/bar.h> +#include <subdev/fb.h> +#include <subdev/vm.h> + +struct nv50_bar_priv { + struct nouveau_bar base; + spinlock_t lock; + struct nouveau_gpuobj *mem; + struct nouveau_gpuobj *pad; + struct nouveau_gpuobj *pgd; + struct nouveau_vm *bar1_vm; + struct nouveau_gpuobj *bar1; + struct nouveau_vm *bar3_vm; + struct nouveau_gpuobj *bar3; +}; + +static int +nv50_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem, + u32 flags, struct nouveau_vma *vma) +{ + struct nv50_bar_priv *priv = (void *)bar; + int ret; + + ret = nouveau_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma); + if (ret) + return ret; + + nouveau_vm_map(vma, mem); + nv50_vm_flush_engine(nv_subdev(bar), 6); + return 0; +} + +static int +nv50_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem, + u32 flags, struct nouveau_vma *vma) +{ + struct nv50_bar_priv *priv = (void *)bar; + int ret; + + ret = nouveau_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma); + if (ret) + return ret; + + nouveau_vm_map(vma, mem); + nv50_vm_flush_engine(nv_subdev(bar), 6); + return 0; +} + +static void +nv50_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma) +{ + nouveau_vm_unmap(vma); + nv50_vm_flush_engine(nv_subdev(bar), 6); + nouveau_vm_put(vma); +} + +static void +nv50_bar_flush(struct nouveau_bar *bar) +{ + struct nv50_bar_priv *priv = (void *)bar; + unsigned long flags; + spin_lock_irqsave(&priv->lock, flags); + nv_wr32(priv, 0x00330c, 0x00000001); + if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000)) + nv_warn(priv, "flush timeout\n"); + spin_unlock_irqrestore(&priv->lock, flags); +} + +void +nv84_bar_flush(struct nouveau_bar *bar) +{ + struct nv50_bar_priv *priv = (void *)bar; + unsigned long flags; + spin_lock_irqsave(&priv->lock, flags); + nv_wr32(bar, 0x070000, 0x00000001); + if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000)) + nv_warn(priv, "flush timeout\n"); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static int +nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct nouveau_object *heap; + struct nouveau_vm *vm; + struct nv50_bar_priv *priv; + u64 start, limit; + int ret; + + ret = nouveau_bar_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0, + NVOBJ_FLAG_HEAP, &priv->mem); + heap = nv_object(priv->mem); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), heap, + (device->chipset == 0x50) ? 0x1400 : 0x0200, + 0, 0, &priv->pad); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), heap, 0x4000, 0, + 0, &priv->pgd); + if (ret) + return ret; + + /* BAR3 */ + start = 0x0100000000ULL; + limit = start + pci_resource_len(device->pdev, 3); + + ret = nouveau_vm_new(device, start, limit, start, &vm); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), heap, + ((limit-- - start) >> 12) * 8, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]); + vm->pgt[0].refcount[0] = 1; + if (ret) + return ret; + + ret = nouveau_vm_ref(vm, &priv->bar3_vm, priv->pgd); + nouveau_vm_ref(NULL, &vm, NULL); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3); + if (ret) + return ret; + + nv_wo32(priv->bar3, 0x00, 0x7fc00000); + nv_wo32(priv->bar3, 0x04, lower_32_bits(limit)); + nv_wo32(priv->bar3, 0x08, lower_32_bits(start)); + nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 | + upper_32_bits(start)); + nv_wo32(priv->bar3, 0x10, 0x00000000); + nv_wo32(priv->bar3, 0x14, 0x00000000); + + /* BAR1 */ + start = 0x0000000000ULL; + limit = start + pci_resource_len(device->pdev, 1); + + ret = nouveau_vm_new(device, start, limit--, start, &vm); + if (ret) + return ret; + + ret = nouveau_vm_ref(vm, &priv->bar1_vm, priv->pgd); + nouveau_vm_ref(NULL, &vm, NULL); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1); + if (ret) + return ret; + + nv_wo32(priv->bar1, 0x00, 0x7fc00000); + nv_wo32(priv->bar1, 0x04, lower_32_bits(limit)); + nv_wo32(priv->bar1, 0x08, lower_32_bits(start)); + nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 | + upper_32_bits(start)); + nv_wo32(priv->bar1, 0x10, 0x00000000); + nv_wo32(priv->bar1, 0x14, 0x00000000); + + priv->base.alloc = nouveau_bar_alloc; + priv->base.kmap = nv50_bar_kmap; + priv->base.umap = nv50_bar_umap; + priv->base.unmap = nv50_bar_unmap; + if (device->chipset == 0x50) + priv->base.flush = nv50_bar_flush; + else + priv->base.flush = nv84_bar_flush; + spin_lock_init(&priv->lock); + return 0; +} + +static void +nv50_bar_dtor(struct nouveau_object *object) +{ + struct nv50_bar_priv *priv = (void *)object; + nouveau_gpuobj_ref(NULL, &priv->bar1); + nouveau_vm_ref(NULL, &priv->bar1_vm, priv->pgd); + nouveau_gpuobj_ref(NULL, &priv->bar3); + if (priv->bar3_vm) { + nouveau_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]); + nouveau_vm_ref(NULL, &priv->bar3_vm, priv->pgd); + } + nouveau_gpuobj_ref(NULL, &priv->pgd); + nouveau_gpuobj_ref(NULL, &priv->pad); + nouveau_gpuobj_ref(NULL, &priv->mem); + nouveau_bar_destroy(&priv->base); +} + +static int +nv50_bar_init(struct nouveau_object *object) +{ + struct nv50_bar_priv *priv = (void *)object; + int ret; + + ret = nouveau_bar_init(&priv->base); + if (ret) + return ret; + + nv_mask(priv, 0x000200, 0x00000100, 0x00000000); + nv_mask(priv, 0x000200, 0x00000100, 0x00000100); + nv50_vm_flush_engine(nv_subdev(priv), 6); + + nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12); + nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12); + nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4); + nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4); + return 0; +} + +static int +nv50_bar_fini(struct nouveau_object *object, bool suspend) +{ + struct nv50_bar_priv *priv = (void *)object; + return nouveau_bar_fini(&priv->base, suspend); +} + +struct nouveau_oclass +nv50_bar_oclass = { + .handle = NV_SUBDEV(BAR, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_bar_ctor, + .dtor = nv50_bar_dtor, + .init = nv50_bar_init, + .fini = nv50_bar_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c new file mode 100644 index 0000000..f8a4495 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c @@ -0,0 +1,219 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/gpuobj.h> + +#include <subdev/timer.h> +#include <subdev/bar.h> +#include <subdev/fb.h> +#include <subdev/vm.h> + +struct nvc0_bar_priv { + struct nouveau_bar base; + spinlock_t lock; + struct { + struct nouveau_gpuobj *mem; + struct nouveau_gpuobj *pgd; + struct nouveau_vm *vm; + } bar[2]; +}; + +static int +nvc0_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem, + u32 flags, struct nouveau_vma *vma) +{ + struct nvc0_bar_priv *priv = (void *)bar; + int ret; + + ret = nouveau_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma); + if (ret) + return ret; + + nouveau_vm_map(vma, mem); + nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[0].pgd->addr, 5); + return 0; +} + +static int +nvc0_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem, + u32 flags, struct nouveau_vma *vma) +{ + struct nvc0_bar_priv *priv = (void *)bar; + int ret; + + ret = nouveau_vm_get(priv->bar[1].vm, mem->size << 12, + mem->page_shift, flags, vma); + if (ret) + return ret; + + nouveau_vm_map(vma, mem); + nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[1].pgd->addr, 5); + return 0; +} + +static void +nvc0_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma) +{ + struct nvc0_bar_priv *priv = (void *)bar; + int i = !(vma->vm == priv->bar[0].vm); + + nouveau_vm_unmap(vma); + nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[i].pgd->addr, 5); + nouveau_vm_put(vma); +} + +static int +nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct pci_dev *pdev = device->pdev; + struct nvc0_bar_priv *priv; + struct nouveau_gpuobj *mem; + struct nouveau_vm *vm; + int ret; + + ret = nouveau_bar_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + /* BAR3 */ + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0, + &priv->bar[0].mem); + mem = priv->bar[0].mem; + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0, + &priv->bar[0].pgd); + if (ret) + return ret; + + ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 3), 0, &vm); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, + (pci_resource_len(pdev, 3) >> 12) * 8, + 0x1000, NVOBJ_FLAG_ZERO_ALLOC, + &vm->pgt[0].obj[0]); + vm->pgt[0].refcount[0] = 1; + if (ret) + return ret; + + ret = nouveau_vm_ref(vm, &priv->bar[0].vm, priv->bar[0].pgd); + nouveau_vm_ref(NULL, &vm, NULL); + if (ret) + return ret; + + nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[0].pgd->addr)); + nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[0].pgd->addr)); + nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 3) - 1)); + nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 3) - 1)); + + /* BAR1 */ + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0, + &priv->bar[1].mem); + mem = priv->bar[1].mem; + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0, + &priv->bar[1].pgd); + if (ret) + return ret; + + ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 1), 0, &vm); + if (ret) + return ret; + + ret = nouveau_vm_ref(vm, &priv->bar[1].vm, priv->bar[1].pgd); + nouveau_vm_ref(NULL, &vm, NULL); + if (ret) + return ret; + + nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[1].pgd->addr)); + nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[1].pgd->addr)); + nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 1) - 1)); + nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 1) - 1)); + + priv->base.alloc = nouveau_bar_alloc; + priv->base.kmap = nvc0_bar_kmap; + priv->base.umap = nvc0_bar_umap; + priv->base.unmap = nvc0_bar_unmap; + priv->base.flush = nv84_bar_flush; + spin_lock_init(&priv->lock); + return 0; +} + +static void +nvc0_bar_dtor(struct nouveau_object *object) +{ + struct nvc0_bar_priv *priv = (void *)object; + + nouveau_vm_ref(NULL, &priv->bar[1].vm, priv->bar[1].pgd); + nouveau_gpuobj_ref(NULL, &priv->bar[1].pgd); + nouveau_gpuobj_ref(NULL, &priv->bar[1].mem); + + if (priv->bar[0].vm) { + nouveau_gpuobj_ref(NULL, &priv->bar[0].vm->pgt[0].obj[0]); + nouveau_vm_ref(NULL, &priv->bar[0].vm, priv->bar[0].pgd); + } + nouveau_gpuobj_ref(NULL, &priv->bar[0].pgd); + nouveau_gpuobj_ref(NULL, &priv->bar[0].mem); + + nouveau_bar_destroy(&priv->base); +} + +static int +nvc0_bar_init(struct nouveau_object *object) +{ + struct nvc0_bar_priv *priv = (void *)object; + int ret; + + ret = nouveau_bar_init(&priv->base); + if (ret) + return ret; + + nv_mask(priv, 0x000200, 0x00000100, 0x00000000); + nv_mask(priv, 0x000200, 0x00000100, 0x00000100); + nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); + + nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12); + nv_wr32(priv, 0x001714, 0xc0000000 | priv->bar[0].mem->addr >> 12); + return 0; +} + +struct nouveau_oclass +nvc0_bar_oclass = { + .handle = NV_SUBDEV(BAR, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_bar_ctor, + .dtor = nvc0_bar_dtor, + .init = nvc0_bar_init, + .fini = _nouveau_bar_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c new file mode 100644 index 0000000..0e2c1a4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c @@ -0,0 +1,520 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <core/device.h> +#include <core/subdev.h> +#include <core/option.h> + +#include <subdev/bios.h> +#include <subdev/bios/bmp.h> +#include <subdev/bios/bit.h> + +u8 +nvbios_checksum(const u8 *data, int size) +{ + u8 sum = 0; + while (size--) + sum += *data++; + return sum; +} + +u16 +nvbios_findstr(const u8 *data, int size, const char *str, int len) +{ + int i, j; + + for (i = 0; i <= (size - len); i++) { + for (j = 0; j < len; j++) + if ((char)data[i + j] != str[j]) + break; + if (j == len) + return i; + } + + return 0; +} + +#if defined(__powerpc__) +static void +nouveau_bios_shadow_of(struct nouveau_bios *bios) +{ + struct pci_dev *pdev = nv_device(bios)->pdev; + struct device_node *dn; + const u32 *data; + int size; + + dn = pci_device_to_OF_node(pdev); + if (!dn) { + nv_info(bios, "Unable to get the OF node\n"); + return; + } + + data = of_get_property(dn, "NVDA,BMP", &size); + if (data && size) { + bios->size = size; + bios->data = kmalloc(bios->size, GFP_KERNEL); + if (bios->data) + memcpy(bios->data, data, size); + } +} +#endif + +static void +nouveau_bios_shadow_pramin(struct nouveau_bios *bios) +{ + struct nouveau_device *device = nv_device(bios); + u32 bar0 = 0; + int i; + + if (device->card_type >= NV_50) { + u64 addr = (u64)(nv_rd32(bios, 0x619f04) & 0xffffff00) << 8; + if (!addr) { + addr = (u64)nv_rd32(bios, 0x001700) << 16; + addr += 0xf0000; + } + + bar0 = nv_mask(bios, 0x001700, 0xffffffff, addr >> 16); + } + + /* bail if no rom signature */ + if (nv_rd08(bios, 0x700000) != 0x55 || + nv_rd08(bios, 0x700001) != 0xaa) + goto out; + + bios->size = nv_rd08(bios, 0x700002) * 512; + if (!bios->size) + goto out; + + bios->data = kmalloc(bios->size, GFP_KERNEL); + if (bios->data) { + for (i = 0; i < bios->size; i++) + nv_wo08(bios, i, nv_rd08(bios, 0x700000 + i)); + } + +out: + if (device->card_type >= NV_50) + nv_wr32(bios, 0x001700, bar0); +} + +static void +nouveau_bios_shadow_prom(struct nouveau_bios *bios) +{ + struct nouveau_device *device = nv_device(bios); + u32 pcireg, access; + u16 pcir; + int i; + + /* enable access to rom */ + if (device->card_type >= NV_50) + pcireg = 0x088050; + else + pcireg = 0x001850; + access = nv_mask(bios, pcireg, 0x00000001, 0x00000000); + + /* bail if no rom signature, with a workaround for a PROM reading + * issue on some chipsets. the first read after a period of + * inactivity returns the wrong result, so retry the first header + * byte a few times before giving up as a workaround + */ + i = 16; + do { + if (nv_rd08(bios, 0x300000) == 0x55) + break; + } while (i--); + + if (!i || nv_rd08(bios, 0x300001) != 0xaa) + goto out; + + /* additional check (see note below) - read PCI record header */ + pcir = nv_rd08(bios, 0x300018) | + nv_rd08(bios, 0x300019) << 8; + if (nv_rd08(bios, 0x300000 + pcir) != 'P' || + nv_rd08(bios, 0x300001 + pcir) != 'C' || + nv_rd08(bios, 0x300002 + pcir) != 'I' || + nv_rd08(bios, 0x300003 + pcir) != 'R') + goto out; + + /* read entire bios image to system memory */ + bios->size = nv_rd08(bios, 0x300002) * 512; + if (!bios->size) + goto out; + + bios->data = kmalloc(bios->size, GFP_KERNEL); + if (bios->data) { + for (i = 0; i < bios->size; i++) + nv_wo08(bios, i, nv_rd08(bios, 0x300000 + i)); + } + +out: + /* disable access to rom */ + nv_wr32(bios, pcireg, access); +} + +#if defined(CONFIG_ACPI) && defined(CONFIG_X86) +int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len); +bool nouveau_acpi_rom_supported(struct pci_dev *pdev); +#else +static inline bool +nouveau_acpi_rom_supported(struct pci_dev *pdev) { + return false; +} + +static inline int +nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { + return -EINVAL; +} +#endif + +static void +nouveau_bios_shadow_acpi(struct nouveau_bios *bios) +{ + struct pci_dev *pdev = nv_device(bios)->pdev; + int ret, cnt, i; + + if (!nouveau_acpi_rom_supported(pdev)) { + bios->data = NULL; + return; + } + + bios->size = 0; + bios->data = kmalloc(4096, GFP_KERNEL); + if (bios->data) { + if (nouveau_acpi_get_bios_chunk(bios->data, 0, 4096) == 4096) + bios->size = bios->data[2] * 512; + kfree(bios->data); + } + + if (!bios->size) + return; + + bios->data = kmalloc(bios->size, GFP_KERNEL); + if (bios->data) { + /* disobey the acpi spec - much faster on at least w530 ... */ + ret = nouveau_acpi_get_bios_chunk(bios->data, 0, bios->size); + if (ret != bios->size || + nvbios_checksum(bios->data, bios->size)) { + /* ... that didn't work, ok, i'll be good now */ + for (i = 0; i < bios->size; i += cnt) { + cnt = min((bios->size - i), (u32)4096); + ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt); + if (ret != cnt) + break; + } + } + } +} + +static void +nouveau_bios_shadow_pci(struct nouveau_bios *bios) +{ + struct pci_dev *pdev = nv_device(bios)->pdev; + size_t size; + + if (!pci_enable_rom(pdev)) { + void __iomem *rom = pci_map_rom(pdev, &size); + if (rom && size) { + bios->data = kmalloc(size, GFP_KERNEL); + if (bios->data) { + memcpy_fromio(bios->data, rom, size); + bios->size = size; + } + } + if (rom) + pci_unmap_rom(pdev, rom); + + pci_disable_rom(pdev); + } +} + +static void +nouveau_bios_shadow_platform(struct nouveau_bios *bios) +{ + struct pci_dev *pdev = nv_device(bios)->pdev; + size_t size; + + void __iomem *rom = pci_platform_rom(pdev, &size); + if (rom && size) { + bios->data = kmalloc(size, GFP_KERNEL); + if (bios->data) { + memcpy_fromio(bios->data, rom, size); + bios->size = size; + } + } +} + +static int +nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) +{ + if (bios->size < 3 || !bios->data || bios->data[0] != 0x55 || + bios->data[1] != 0xAA) { + nv_info(bios, "... signature not found\n"); + return 0; + } + + if (nvbios_checksum(bios->data, + min_t(u32, bios->data[2] * 512, bios->size))) { + nv_info(bios, "... checksum invalid\n"); + /* if a ro image is somewhat bad, it's probably all rubbish */ + return writeable ? 2 : 1; + } + + nv_info(bios, "... appears to be valid\n"); + return 3; +} + +struct methods { + const char desc[16]; + void (*shadow)(struct nouveau_bios *); + const bool rw; + int score; + u32 size; + u8 *data; +}; + +static int +nouveau_bios_shadow(struct nouveau_bios *bios) +{ + struct methods shadow_methods[] = { +#if defined(__powerpc__) + { "OpenFirmware", nouveau_bios_shadow_of, true, 0, 0, NULL }, +#endif + { "PRAMIN", nouveau_bios_shadow_pramin, true, 0, 0, NULL }, + { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL }, + { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL }, + { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL }, + { "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL }, + {} + }; + struct methods *mthd, *best; + const struct firmware *fw; + const char *optarg; + int optlen, ret; + char *source; + + optarg = nouveau_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen); + source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL; + if (source) { + /* try to match one of the built-in methods */ + mthd = shadow_methods; + do { + if (strcasecmp(source, mthd->desc)) + continue; + nv_info(bios, "source: %s\n", mthd->desc); + + mthd->shadow(bios); + mthd->score = nouveau_bios_score(bios, mthd->rw); + if (mthd->score) { + kfree(source); + return 0; + } + } while ((++mthd)->shadow); + + /* attempt to load firmware image */ + ret = request_firmware(&fw, source, &nv_device(bios)->pdev->dev); + if (ret == 0) { + bios->size = fw->size; + bios->data = kmemdup(fw->data, fw->size, GFP_KERNEL); + release_firmware(fw); + + nv_info(bios, "image: %s\n", source); + if (nouveau_bios_score(bios, 1)) { + kfree(source); + return 0; + } + + kfree(bios->data); + bios->data = NULL; + } + + nv_error(bios, "source \'%s\' invalid\n", source); + kfree(source); + } + + mthd = shadow_methods; + do { + nv_info(bios, "checking %s for image...\n", mthd->desc); + mthd->shadow(bios); + mthd->score = nouveau_bios_score(bios, mthd->rw); + mthd->size = bios->size; + mthd->data = bios->data; + bios->data = NULL; + } while (mthd->score != 3 && (++mthd)->shadow); + + mthd = shadow_methods; + best = mthd; + do { + if (mthd->score > best->score) { + kfree(best->data); + best = mthd; + } + } while ((++mthd)->shadow); + + if (best->score) { + nv_info(bios, "using image from %s\n", best->desc); + bios->size = best->size; + bios->data = best->data; + return 0; + } + + nv_error(bios, "unable to locate usable image\n"); + return -EINVAL; +} + +static u8 +nouveau_bios_rd08(struct nouveau_object *object, u64 addr) +{ + struct nouveau_bios *bios = (void *)object; + return bios->data[addr]; +} + +static u16 +nouveau_bios_rd16(struct nouveau_object *object, u64 addr) +{ + struct nouveau_bios *bios = (void *)object; + return get_unaligned_le16(&bios->data[addr]); +} + +static u32 +nouveau_bios_rd32(struct nouveau_object *object, u64 addr) +{ + struct nouveau_bios *bios = (void *)object; + return get_unaligned_le32(&bios->data[addr]); +} + +static void +nouveau_bios_wr08(struct nouveau_object *object, u64 addr, u8 data) +{ + struct nouveau_bios *bios = (void *)object; + bios->data[addr] = data; +} + +static void +nouveau_bios_wr16(struct nouveau_object *object, u64 addr, u16 data) +{ + struct nouveau_bios *bios = (void *)object; + put_unaligned_le16(data, &bios->data[addr]); +} + +static void +nouveau_bios_wr32(struct nouveau_object *object, u64 addr, u32 data) +{ + struct nouveau_bios *bios = (void *)object; + put_unaligned_le32(data, &bios->data[addr]); +} + +static int +nouveau_bios_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_bios *bios; + struct bit_entry bit_i; + int ret; + + ret = nouveau_subdev_create(parent, engine, oclass, 0, + "VBIOS", "bios", &bios); + *pobject = nv_object(bios); + if (ret) + return ret; + + ret = nouveau_bios_shadow(bios); + if (ret) + return ret; + + /* detect type of vbios we're dealing with */ + bios->bmp_offset = nvbios_findstr(bios->data, bios->size, + "\xff\x7f""NV\0", 5); + if (bios->bmp_offset) { + nv_info(bios, "BMP version %x.%x\n", + bmp_version(bios) >> 8, + bmp_version(bios) & 0xff); + } + + bios->bit_offset = nvbios_findstr(bios->data, bios->size, + "\xff\xb8""BIT", 5); + if (bios->bit_offset) + nv_info(bios, "BIT signature found\n"); + + /* determine the vbios version number */ + if (!bit_entry(bios, 'i', &bit_i) && bit_i.length >= 4) { + bios->version.major = nv_ro08(bios, bit_i.offset + 3); + bios->version.chip = nv_ro08(bios, bit_i.offset + 2); + bios->version.minor = nv_ro08(bios, bit_i.offset + 1); + bios->version.micro = nv_ro08(bios, bit_i.offset + 0); + bios->version.patch = nv_ro08(bios, bit_i.offset + 4); + } else + if (bmp_version(bios)) { + bios->version.major = nv_ro08(bios, bios->bmp_offset + 13); + bios->version.chip = nv_ro08(bios, bios->bmp_offset + 12); + bios->version.minor = nv_ro08(bios, bios->bmp_offset + 11); + bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10); + } + + nv_info(bios, "version %02x.%02x.%02x.%02x.%02x\n", + bios->version.major, bios->version.chip, + bios->version.minor, bios->version.micro, bios->version.patch); + + return 0; +} + +static void +nouveau_bios_dtor(struct nouveau_object *object) +{ + struct nouveau_bios *bios = (void *)object; + kfree(bios->data); + nouveau_subdev_destroy(&bios->base); +} + +static int +nouveau_bios_init(struct nouveau_object *object) +{ + struct nouveau_bios *bios = (void *)object; + return nouveau_subdev_init(&bios->base); +} + +static int +nouveau_bios_fini(struct nouveau_object *object, bool suspend) +{ + struct nouveau_bios *bios = (void *)object; + return nouveau_subdev_fini(&bios->base, suspend); +} + +struct nouveau_oclass +nouveau_bios_oclass = { + .handle = NV_SUBDEV(VBIOS, 0x00), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nouveau_bios_ctor, + .dtor = nouveau_bios_dtor, + .init = nouveau_bios_init, + .fini = nouveau_bios_fini, + .rd08 = nouveau_bios_rd08, + .rd16 = nouveau_bios_rd16, + .rd32 = nouveau_bios_rd32, + .wr08 = nouveau_bios_wr08, + .wr16 = nouveau_bios_wr16, + .wr32 = nouveau_bios_wr32, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/bit.c b/drivers/gpu/drm/nouveau/core/subdev/bios/bit.c new file mode 100644 index 0000000..1d03a3f --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/bit.c @@ -0,0 +1,52 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "core/object.h" + +#include "subdev/bios.h" +#include "subdev/bios/bit.h" + +int +bit_entry(struct nouveau_bios *bios, u8 id, struct bit_entry *bit) +{ + if (likely(bios->bit_offset)) { + u8 entries = nv_ro08(bios, bios->bit_offset + 10); + u32 entry = bios->bit_offset + 12; + while (entries--) { + if (nv_ro08(bios, entry + 0) == id) { + bit->id = nv_ro08(bios, entry + 0); + bit->version = nv_ro08(bios, entry + 1); + bit->length = nv_ro16(bios, entry + 2); + bit->offset = nv_ro16(bios, entry + 4); + return 0; + } + + entry += nv_ro08(bios, bios->bit_offset + 9); + } + + return -ENOENT; + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c b/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c new file mode 100644 index 0000000..5ac010e --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c @@ -0,0 +1,56 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/device.h> + +#include <subdev/bios.h> +#include <subdev/bios/dcb.h> +#include <subdev/bios/conn.h> + +u16 +dcb_conntab(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +{ + u16 dcb = dcb_table(bios, ver, hdr, cnt, len); + if (dcb && *ver >= 0x30 && *hdr >= 0x16) { + u16 data = nv_ro16(bios, dcb + 0x14); + if (data) { + *ver = nv_ro08(bios, data + 0); + *hdr = nv_ro08(bios, data + 1); + *cnt = nv_ro08(bios, data + 2); + *len = nv_ro08(bios, data + 3); + return data; + } + } + return 0x0000; +} + +u16 +dcb_conn(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len) +{ + u8 hdr, cnt; + u16 data = dcb_conntab(bios, ver, &hdr, &cnt, len); + if (data && idx < cnt) + return data + hdr + (idx * *len); + return 0x0000; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c new file mode 100644 index 0000000..2d9b9d7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c @@ -0,0 +1,204 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "core/device.h" + +#include "subdev/bios.h" +#include "subdev/bios/dcb.h" + +u16 +dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +{ + struct nouveau_device *device = nv_device(bios); + u16 dcb = 0x0000; + + if (device->card_type > NV_04) + dcb = nv_ro16(bios, 0x36); + if (!dcb) { + nv_warn(bios, "DCB table not found\n"); + return dcb; + } + + *ver = nv_ro08(bios, dcb); + + if (*ver >= 0x41) { + nv_warn(bios, "DCB version 0x%02x unknown\n", *ver); + return 0x0000; + } else + if (*ver >= 0x30) { + if (nv_ro32(bios, dcb + 6) == 0x4edcbdcb) { + *hdr = nv_ro08(bios, dcb + 1); + *cnt = nv_ro08(bios, dcb + 2); + *len = nv_ro08(bios, dcb + 3); + return dcb; + } + } else + if (*ver >= 0x20) { + if (nv_ro32(bios, dcb + 4) == 0x4edcbdcb) { + u16 i2c = nv_ro16(bios, dcb + 2); + *hdr = 8; + *cnt = (i2c - dcb) / 8; + *len = 8; + return dcb; + } + } else + if (*ver >= 0x15) { + if (!nv_memcmp(bios, dcb - 7, "DEV_REC", 7)) { + u16 i2c = nv_ro16(bios, dcb + 2); + *hdr = 4; + *cnt = (i2c - dcb) / 10; + *len = 10; + return dcb; + } + } else { + /* + * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but + * always has the same single (crt) entry, even when tv-out + * present, so the conclusion is this version cannot really + * be used. + * + * v1.2 tables (some NV6/10, and NV15+) normally have the + * same 5 entries, which are not specific to the card and so + * no use. + * + * v1.2 does have an I2C table that read_dcb_i2c_table can + * handle, but cards exist (nv11 in #14821) with a bad i2c + * table pointer, so use the indices parsed in + * parse_bmp_structure. + * + * v1.1 (NV5+, maybe some NV4) is entirely unhelpful + */ + nv_warn(bios, "DCB contains no useful data\n"); + return 0x0000; + } + + nv_warn(bios, "DCB header validation failed\n"); + return 0x0000; +} + +u16 +dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len) +{ + u8 hdr, cnt; + u16 dcb = dcb_table(bios, ver, &hdr, &cnt, len); + if (dcb && idx < cnt) + return dcb + hdr + (idx * *len); + return 0x0000; +} + +static inline u16 +dcb_outp_hasht(struct dcb_output *outp) +{ + return (outp->extdev << 8) | (outp->location << 4) | outp->type; +} + +static inline u16 +dcb_outp_hashm(struct dcb_output *outp) +{ + return (outp->heads << 8) | (outp->link << 6) | outp->or; +} + +u16 +dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len, + struct dcb_output *outp) +{ + u16 dcb = dcb_outp(bios, idx, ver, len); + if (dcb) { + if (*ver >= 0x20) { + u32 conn = nv_ro32(bios, dcb + 0x00); + outp->or = (conn & 0x0f000000) >> 24; + outp->location = (conn & 0x00300000) >> 20; + outp->bus = (conn & 0x000f0000) >> 16; + outp->connector = (conn & 0x0000f000) >> 12; + outp->heads = (conn & 0x00000f00) >> 8; + outp->i2c_index = (conn & 0x000000f0) >> 4; + outp->type = (conn & 0x0000000f); + outp->link = 0; + } else { + dcb = 0x0000; + } + + if (*ver >= 0x40) { + u32 conf = nv_ro32(bios, dcb + 0x04); + switch (outp->type) { + case DCB_OUTPUT_TMDS: + case DCB_OUTPUT_LVDS: + case DCB_OUTPUT_DP: + outp->link = (conf & 0x00000030) >> 4; + outp->sorconf.link = outp->link; /*XXX*/ + outp->extdev = 0x00; + if (outp->location != 0) + outp->extdev = (conf & 0x0000ff00) >> 8; + break; + default: + break; + } + } + + outp->hasht = dcb_outp_hasht(outp); + outp->hashm = dcb_outp_hashm(outp); + } + return dcb; +} + +u16 +dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask, + u8 *ver, u8 *len, struct dcb_output *outp) +{ + u16 dcb, idx = 0; + while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) { + if ((dcb_outp_hasht(outp) & 0x00ff) == (type & 0x00ff)) { + if ((dcb_outp_hashm(outp) & mask) == mask) + break; + } + } + return dcb; +} + +int +dcb_outp_foreach(struct nouveau_bios *bios, void *data, + int (*exec)(struct nouveau_bios *, void *, int, u16)) +{ + int ret, idx = -1; + u8 ver, len; + u16 outp; + + while ((outp = dcb_outp(bios, ++idx, &ver, &len))) { + if (nv_ro32(bios, outp) == 0x00000000) + break; /* seen on an NV11 with DCB v1.5 */ + if (nv_ro32(bios, outp) == 0xffffffff) + break; /* seen on an NV17 with DCB v2.0 */ + + if (nv_ro08(bios, outp) == DCB_OUTPUT_UNUSED) + continue; + if (nv_ro08(bios, outp) == DCB_OUTPUT_EOL) + break; + + ret = exec(bios, data, idx, outp); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c new file mode 100644 index 0000000..7f16e52 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c @@ -0,0 +1,178 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/bios.h> +#include <subdev/bios/bit.h> +#include <subdev/bios/disp.h> + +u16 +nvbios_disp_table(struct nouveau_bios *bios, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub) +{ + struct bit_entry U; + + if (!bit_entry(bios, 'U', &U)) { + if (U.version == 1) { + u16 data = nv_ro16(bios, U.offset); + if (data) { + *ver = nv_ro08(bios, data + 0x00); + switch (*ver) { + case 0x20: + case 0x21: + *hdr = nv_ro08(bios, data + 0x01); + *len = nv_ro08(bios, data + 0x02); + *cnt = nv_ro08(bios, data + 0x03); + *sub = nv_ro08(bios, data + 0x04); + return data; + default: + break; + } + } + } + } + + return 0x0000; +} + +u16 +nvbios_disp_entry(struct nouveau_bios *bios, u8 idx, + u8 *ver, u8 *len, u8 *sub) +{ + u8 hdr, cnt; + u16 data = nvbios_disp_table(bios, ver, &hdr, &cnt, len, sub); + if (data && idx < cnt) + return data + hdr + (idx * *len); + *ver = 0x00; + return 0x0000; +} + +u16 +nvbios_disp_parse(struct nouveau_bios *bios, u8 idx, + u8 *ver, u8 *len, u8 *sub, + struct nvbios_disp *info) +{ + u16 data = nvbios_disp_entry(bios, idx, ver, len, sub); + if (data && *len >= 2) { + info->data = nv_ro16(bios, data + 0); + return data; + } + return 0x0000; +} + +u16 +nvbios_outp_entry(struct nouveau_bios *bios, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +{ + struct nvbios_disp info; + u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info); + if (data) { + *cnt = nv_ro08(bios, info.data + 0x05); + *len = 0x06; + data = info.data; + } + return data; +} + +u16 +nvbios_outp_parse(struct nouveau_bios *bios, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_outp *info) +{ + u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len); + if (data && *hdr >= 0x0a) { + info->type = nv_ro16(bios, data + 0x00); + info->mask = nv_ro32(bios, data + 0x02); + if (*ver <= 0x20) /* match any link */ + info->mask |= 0x00c0; + info->script[0] = nv_ro16(bios, data + 0x06); + info->script[1] = nv_ro16(bios, data + 0x08); + info->script[2] = 0x0000; + if (*hdr >= 0x0c) + info->script[2] = nv_ro16(bios, data + 0x0a); + return data; + } + return 0x0000; +} + +u16 +nvbios_outp_match(struct nouveau_bios *bios, u16 type, u16 mask, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_outp *info) +{ + u16 data, idx = 0; + while ((data = nvbios_outp_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) { + if (data && info->type == type) { + if ((info->mask & mask) == mask) + break; + } + } + return data; +} + +u16 +nvbios_ocfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +{ + if (idx < *cnt) + return outp + *hdr + (idx * *len); + return 0x0000; +} + +u16 +nvbios_ocfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_ocfg *info) +{ + u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len); + if (data) { + info->match = nv_ro16(bios, data + 0x00); + info->clkcmp[0] = nv_ro16(bios, data + 0x02); + info->clkcmp[1] = nv_ro16(bios, data + 0x04); + } + return data; +} + +u16 +nvbios_ocfg_match(struct nouveau_bios *bios, u16 outp, u16 type, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_ocfg *info) +{ + u16 data, idx = 0; + while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) { + if (info->match == type) + break; + } + return data; +} + +u16 +nvbios_oclk_match(struct nouveau_bios *bios, u16 cmp, u32 khz) +{ + while (cmp) { + if (khz / 10 >= nv_ro16(bios, cmp + 0x00)) + return nv_ro16(bios, cmp + 0x02); + cmp += 0x04; + } + return 0x0000; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c new file mode 100644 index 0000000..663853b --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c @@ -0,0 +1,204 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + + +#include "subdev/bios.h" +#include "subdev/bios/bit.h" +#include "subdev/bios/dp.h" + +static u16 +nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +{ + struct bit_entry d; + + if (!bit_entry(bios, 'd', &d)) { + if (d.version == 1 && d.length >= 2) { + u16 data = nv_ro16(bios, d.offset); + if (data) { + *ver = nv_ro08(bios, data + 0x00); + switch (*ver) { + case 0x21: + case 0x30: + case 0x40: + *hdr = nv_ro08(bios, data + 0x01); + *len = nv_ro08(bios, data + 0x02); + *cnt = nv_ro08(bios, data + 0x03); + return data; + default: + break; + } + } + } + } + + return 0x0000; +} + +static u16 +nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +{ + u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len); + if (data && idx < *cnt) { + u16 outp = nv_ro16(bios, data + *hdr + idx * *len); + switch (*ver * !!outp) { + case 0x21: + case 0x30: + *hdr = nv_ro08(bios, data + 0x04); + *len = nv_ro08(bios, data + 0x05); + *cnt = nv_ro08(bios, outp + 0x04); + break; + case 0x40: + *hdr = nv_ro08(bios, data + 0x04); + *cnt = 0; + *len = 0; + break; + default: + break; + } + return outp; + } + *ver = 0x00; + return 0x0000; +} + +u16 +nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_dpout *info) +{ + u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len); + if (data && *ver) { + info->type = nv_ro16(bios, data + 0x00); + info->mask = nv_ro16(bios, data + 0x02); + switch (*ver) { + case 0x21: + case 0x30: + info->flags = nv_ro08(bios, data + 0x05); + info->script[0] = nv_ro16(bios, data + 0x06); + info->script[1] = nv_ro16(bios, data + 0x08); + info->lnkcmp = nv_ro16(bios, data + 0x0a); + info->script[2] = nv_ro16(bios, data + 0x0c); + info->script[3] = nv_ro16(bios, data + 0x0e); + info->script[4] = nv_ro16(bios, data + 0x10); + break; + case 0x40: + info->flags = nv_ro08(bios, data + 0x04); + info->script[0] = nv_ro16(bios, data + 0x05); + info->script[1] = nv_ro16(bios, data + 0x07); + info->lnkcmp = nv_ro16(bios, data + 0x09); + info->script[2] = nv_ro16(bios, data + 0x0b); + info->script[3] = nv_ro16(bios, data + 0x0d); + info->script[4] = nv_ro16(bios, data + 0x0f); + break; + default: + data = 0x0000; + break; + } + } + return data; +} + +u16 +nvbios_dpout_match(struct nouveau_bios *bios, u16 type, u16 mask, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_dpout *info) +{ + u16 data, idx = 0; + while ((data = nvbios_dpout_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) { + if (data && info->type == type) { + if ((info->mask & mask) == mask) + break; + } + } + return data; +} + +static u16 +nvbios_dpcfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +{ + if (*ver >= 0x40) { + outp = nvbios_dp_table(bios, ver, hdr, cnt, len); + *hdr = *hdr + (*len * * cnt); + *len = nv_ro08(bios, outp + 0x06); + *cnt = nv_ro08(bios, outp + 0x07); + } + + if (idx < *cnt) + return outp + *hdr + (idx * *len); + + return 0x0000; +} + +u16 +nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_dpcfg *info) +{ + u16 data = nvbios_dpcfg_entry(bios, outp, idx, ver, hdr, cnt, len); + if (data) { + switch (*ver) { + case 0x21: + info->drv = nv_ro08(bios, data + 0x02); + info->pre = nv_ro08(bios, data + 0x03); + info->unk = nv_ro08(bios, data + 0x04); + break; + case 0x30: + case 0x40: + info->drv = nv_ro08(bios, data + 0x01); + info->pre = nv_ro08(bios, data + 0x02); + info->unk = nv_ro08(bios, data + 0x03); + break; + default: + data = 0x0000; + break; + } + } + return data; +} + +u16 +nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 un, u8 vs, u8 pe, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_dpcfg *info) +{ + u8 idx = 0xff; + u16 data; + + if (*ver >= 0x30) { + const u8 vsoff[] = { 0, 4, 7, 9 }; + idx = (un * 10) + vsoff[vs] + pe; + } else { + while ((data = nvbios_dpcfg_entry(bios, outp, idx, + ver, hdr, cnt, len))) { + if (nv_ro08(bios, data + 0x00) == vs && + nv_ro08(bios, data + 0x01) == pe) + break; + idx++; + } + } + + return nvbios_dpcfg_parse(bios, outp, pe, ver, hdr, cnt, len, info); +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c new file mode 100644 index 0000000..b2a676e --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c @@ -0,0 +1,100 @@ +/* + * Copyright 2012 Nouveau Community + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Martin Peres + */ + +#include <subdev/bios.h> +#include <subdev/bios/dcb.h> +#include <subdev/bios/extdev.h> + +static u16 +extdev_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt) +{ + u8 dcb_ver, dcb_hdr, dcb_cnt, dcb_len; + u16 dcb, extdev = 0; + + dcb = dcb_table(bios, &dcb_ver, &dcb_hdr, &dcb_cnt, &dcb_len); + if (!dcb || (dcb_ver != 0x30 && dcb_ver != 0x40)) + return 0x0000; + + extdev = nv_ro16(bios, dcb + 18); + if (!extdev) + return 0x0000; + + *ver = nv_ro08(bios, extdev + 0); + *hdr = nv_ro08(bios, extdev + 1); + *cnt = nv_ro08(bios, extdev + 2); + *len = nv_ro08(bios, extdev + 3); + + return extdev + *hdr; +} + +static u16 +nvbios_extdev_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len) +{ + u8 hdr, cnt; + u16 extdev = extdev_table(bios, ver, &hdr, len, &cnt); + if (extdev && idx < cnt) + return extdev + idx * *len; + return 0x0000; +} + +static void +extdev_parse_entry(struct nouveau_bios *bios, u16 offset, + struct nvbios_extdev_func *entry) +{ + entry->type = nv_ro08(bios, offset + 0); + entry->addr = nv_ro08(bios, offset + 1); + entry->bus = (nv_ro08(bios, offset + 2) >> 4) & 1; +} + +int +nvbios_extdev_parse(struct nouveau_bios *bios, int idx, + struct nvbios_extdev_func *func) +{ + u8 ver, len; + u16 entry; + + if (!(entry = nvbios_extdev_entry(bios, idx, &ver, &len))) + return -EINVAL; + + extdev_parse_entry(bios, entry, func); + + return 0; +} + +int +nvbios_extdev_find(struct nouveau_bios *bios, enum nvbios_extdev_type type, + struct nvbios_extdev_func *func) +{ + u8 ver, len, i; + u16 entry; + + i = 0; + while (!(entry = nvbios_extdev_entry(bios, i++, &ver, &len))) { + extdev_parse_entry(bios, entry, func); + if (func->type == type) + return 0; + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c new file mode 100644 index 0000000..172a4f9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c @@ -0,0 +1,150 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/bios.h> +#include <subdev/bios/dcb.h> +#include <subdev/bios/gpio.h> +#include <subdev/bios/xpio.h> + +u16 +dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +{ + u16 data = 0x0000; + u16 dcb = dcb_table(bios, ver, hdr, cnt, len); + if (dcb) { + if (*ver >= 0x30 && *hdr >= 0x0c) + data = nv_ro16(bios, dcb + 0x0a); + else + if (*ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13) + data = nv_ro16(bios, dcb - 0x0f); + + if (data) { + *ver = nv_ro08(bios, data + 0x00); + if (*ver < 0x30) { + *hdr = 3; + *cnt = nv_ro08(bios, data + 0x02); + *len = nv_ro08(bios, data + 0x01); + } else + if (*ver <= 0x41) { + *hdr = nv_ro08(bios, data + 0x01); + *cnt = nv_ro08(bios, data + 0x02); + *len = nv_ro08(bios, data + 0x03); + } else { + data = 0x0000; + } + } + } + return data; +} + +u16 +dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len) +{ + u8 hdr, cnt, xver; /* use gpio version for xpio entry parsing */ + u16 gpio; + + if (!idx--) + gpio = dcb_gpio_table(bios, ver, &hdr, &cnt, len); + else + gpio = dcb_xpio_table(bios, idx, &xver, &hdr, &cnt, len); + + if (gpio && ent < cnt) + return gpio + hdr + (ent * *len); + return 0x0000; +} + +u16 +dcb_gpio_parse(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len, + struct dcb_gpio_func *gpio) +{ + u16 data = dcb_gpio_entry(bios, idx, ent, ver, len); + if (data) { + if (*ver < 0x40) { + u16 info = nv_ro16(bios, data); + *gpio = (struct dcb_gpio_func) { + .line = (info & 0x001f) >> 0, + .func = (info & 0x07e0) >> 5, + .log[0] = (info & 0x1800) >> 11, + .log[1] = (info & 0x6000) >> 13, + .param = !!(info & 0x8000), + }; + } else + if (*ver < 0x41) { + u32 info = nv_ro32(bios, data); + *gpio = (struct dcb_gpio_func) { + .line = (info & 0x0000001f) >> 0, + .func = (info & 0x0000ff00) >> 8, + .log[0] = (info & 0x18000000) >> 27, + .log[1] = (info & 0x60000000) >> 29, + .param = !!(info & 0x80000000), + }; + } else { + u32 info = nv_ro32(bios, data + 0); + u8 info1 = nv_ro32(bios, data + 4); + *gpio = (struct dcb_gpio_func) { + .line = (info & 0x0000003f) >> 0, + .func = (info & 0x0000ff00) >> 8, + .log[0] = (info1 & 0x30) >> 4, + .log[1] = (info1 & 0xc0) >> 6, + .param = !!(info & 0x80000000), + }; + } + } + + return data; +} + +u16 +dcb_gpio_match(struct nouveau_bios *bios, int idx, u8 func, u8 line, + u8 *ver, u8 *len, struct dcb_gpio_func *gpio) +{ + u8 hdr, cnt, i = 0; + u16 data; + + while ((data = dcb_gpio_parse(bios, idx, i++, ver, len, gpio))) { + if ((line == 0xff || line == gpio->line) && + (func == 0xff || func == gpio->func)) + return data; + } + + /* DCB 2.2, fixed TVDAC GPIO data */ + if ((data = dcb_table(bios, ver, &hdr, &cnt, len))) { + if (*ver >= 0x22 && *ver < 0x30 && func == DCB_GPIO_TVDAC0) { + u8 conf = nv_ro08(bios, data - 5); + u8 addr = nv_ro08(bios, data - 4); + if (conf & 0x01) { + *gpio = (struct dcb_gpio_func) { + .func = DCB_GPIO_TVDAC0, + .line = addr >> 4, + .log[0] = !!(conf & 0x02), + .log[1] = !(conf & 0x02), + }; + *ver = 0x00; + return data; + } + } + } + + return 0x0000; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c new file mode 100644 index 0000000..cfb9288 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c @@ -0,0 +1,134 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + + +#include "subdev/bios.h" +#include "subdev/bios/dcb.h" +#include "subdev/bios/i2c.h" + +u16 +dcb_i2c_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +{ + u16 i2c = 0x0000; + u16 dcb = dcb_table(bios, ver, hdr, cnt, len); + if (dcb) { + if (*ver >= 0x15) + i2c = nv_ro16(bios, dcb + 2); + if (*ver >= 0x30) + i2c = nv_ro16(bios, dcb + 4); + } + + if (i2c && *ver >= 0x30) { + *ver = nv_ro08(bios, i2c + 0); + *hdr = nv_ro08(bios, i2c + 1); + *cnt = nv_ro08(bios, i2c + 2); + *len = nv_ro08(bios, i2c + 3); + } else { + *ver = *ver; /* use DCB version */ + *hdr = 0; + *cnt = 16; + *len = 4; + } + + return i2c; +} + +u16 +dcb_i2c_entry(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len) +{ + u8 hdr, cnt; + u16 i2c = dcb_i2c_table(bios, ver, &hdr, &cnt, len); + if (i2c && idx < cnt) + return i2c + hdr + (idx * *len); + return 0x0000; +} + +int +dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info) +{ + u8 ver, len; + u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); + if (ent) { + info->type = nv_ro08(bios, ent + 3); + info->share = DCB_I2C_UNUSED; + if (ver < 0x30) { + info->type &= 0x07; + if (info->type == 0x07) + info->type = DCB_I2C_UNUSED; + } + + switch (info->type) { + case DCB_I2C_NV04_BIT: + info->drive = nv_ro08(bios, ent + 0); + info->sense = nv_ro08(bios, ent + 1); + return 0; + case DCB_I2C_NV4E_BIT: + info->drive = nv_ro08(bios, ent + 1); + return 0; + case DCB_I2C_NVIO_BIT: + case DCB_I2C_NVIO_AUX: + info->drive = nv_ro08(bios, ent + 0) & 0x0f; + if (nv_ro08(bios, ent + 1) & 0x01) { + info->share = nv_ro08(bios, ent + 1) >> 1; + info->share &= 0x0f; + } + return 0; + case DCB_I2C_UNUSED: + return 0; + default: + nv_warn(bios, "unknown i2c type %d\n", info->type); + info->type = DCB_I2C_UNUSED; + return 0; + } + } + + if (bios->bmp_offset && idx < 2) { + /* BMP (from v4.0 has i2c info in the structure, it's in a + * fixed location on earlier VBIOS + */ + if (nv_ro08(bios, bios->bmp_offset + 5) < 4) + ent = 0x0048; + else + ent = 0x0036 + bios->bmp_offset; + + if (idx == 0) { + info->drive = nv_ro08(bios, ent + 4); + if (!info->drive) info->drive = 0x3f; + info->sense = nv_ro08(bios, ent + 5); + if (!info->sense) info->sense = 0x3e; + } else + if (idx == 1) { + info->drive = nv_ro08(bios, ent + 6); + if (!info->drive) info->drive = 0x37; + info->sense = nv_ro08(bios, ent + 7); + if (!info->sense) info->sense = 0x36; + } + + info->type = DCB_I2C_NV04_BIT; + info->share = DCB_I2C_UNUSED; + return 0; + } + + return -ENOENT; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c new file mode 100644 index 0000000..e2d7f38 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c @@ -0,0 +1,2215 @@ +#include <core/engine.h> +#include <core/device.h> + +#include <subdev/bios.h> +#include <subdev/bios/bmp.h> +#include <subdev/bios/bit.h> +#include <subdev/bios/conn.h> +#include <subdev/bios/dcb.h> +#include <subdev/bios/dp.h> +#include <subdev/bios/gpio.h> +#include <subdev/bios/init.h> +#include <subdev/devinit.h> +#include <subdev/clock.h> +#include <subdev/i2c.h> +#include <subdev/vga.h> +#include <subdev/gpio.h> + +#define bioslog(lvl, fmt, args...) do { \ + nv_printk(init->bios, lvl, "0x%04x[%c]: "fmt, init->offset, \ + init_exec(init) ? '0' + (init->nested - 1) : ' ', ##args); \ +} while(0) +#define cont(fmt, args...) do { \ + if (nv_subdev(init->bios)->debug >= NV_DBG_TRACE) \ + printk(fmt, ##args); \ +} while(0) +#define trace(fmt, args...) bioslog(TRACE, fmt, ##args) +#define warn(fmt, args...) bioslog(WARN, fmt, ##args) +#define error(fmt, args...) bioslog(ERROR, fmt, ##args) + +/****************************************************************************** + * init parser control flow helpers + *****************************************************************************/ + +static inline bool +init_exec(struct nvbios_init *init) +{ + return (init->execute == 1) || ((init->execute & 5) == 5); +} + +static inline void +init_exec_set(struct nvbios_init *init, bool exec) +{ + if (exec) init->execute &= 0xfd; + else init->execute |= 0x02; +} + +static inline void +init_exec_inv(struct nvbios_init *init) +{ + init->execute ^= 0x02; +} + +static inline void +init_exec_force(struct nvbios_init *init, bool exec) +{ + if (exec) init->execute |= 0x04; + else init->execute &= 0xfb; +} + +/****************************************************************************** + * init parser wrappers for normal register/i2c/whatever accessors + *****************************************************************************/ + +static inline int +init_or(struct nvbios_init *init) +{ + if (init_exec(init)) { + if (init->outp) + return ffs(init->outp->or) - 1; + error("script needs OR!!\n"); + } + return 0; +} + +static inline int +init_link(struct nvbios_init *init) +{ + if (init_exec(init)) { + if (init->outp) + return !(init->outp->sorconf.link & 1); + error("script needs OR link\n"); + } + return 0; +} + +static inline int +init_crtc(struct nvbios_init *init) +{ + if (init_exec(init)) { + if (init->crtc >= 0) + return init->crtc; + error("script needs crtc\n"); + } + return 0; +} + +static u8 +init_conn(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 ver, len; + u16 conn; + + if (init_exec(init)) { + if (init->outp) { + conn = init->outp->connector; + conn = dcb_conn(bios, conn, &ver, &len); + if (conn) + return nv_ro08(bios, conn); + } + + error("script needs connector type\n"); + } + + return 0xff; +} + +static inline u32 +init_nvreg(struct nvbios_init *init, u32 reg) +{ + /* C51 (at least) sometimes has the lower bits set which the VBIOS + * interprets to mean that access needs to go through certain IO + * ports instead. The NVIDIA binary driver has been seen to access + * these through the NV register address, so lets assume we can + * do the same + */ + reg &= ~0x00000003; + + /* GF8+ display scripts need register addresses mangled a bit to + * select a specific CRTC/OR + */ + if (nv_device(init->bios)->card_type >= NV_50) { + if (reg & 0x80000000) { + reg += init_crtc(init) * 0x800; + reg &= ~0x80000000; + } + + if (reg & 0x40000000) { + reg += init_or(init) * 0x800; + reg &= ~0x40000000; + if (reg & 0x20000000) { + reg += init_link(init) * 0x80; + reg &= ~0x20000000; + } + } + } + + if (reg & ~0x00fffffc) + warn("unknown bits in register 0x%08x\n", reg); + return reg; +} + +static u32 +init_rd32(struct nvbios_init *init, u32 reg) +{ + reg = init_nvreg(init, reg); + if (init_exec(init)) + return nv_rd32(init->subdev, reg); + return 0x00000000; +} + +static void +init_wr32(struct nvbios_init *init, u32 reg, u32 val) +{ + reg = init_nvreg(init, reg); + if (init_exec(init)) + nv_wr32(init->subdev, reg, val); +} + +static u32 +init_mask(struct nvbios_init *init, u32 reg, u32 mask, u32 val) +{ + reg = init_nvreg(init, reg); + if (init_exec(init)) { + u32 tmp = nv_rd32(init->subdev, reg); + nv_wr32(init->subdev, reg, (tmp & ~mask) | val); + return tmp; + } + return 0x00000000; +} + +static u8 +init_rdport(struct nvbios_init *init, u16 port) +{ + if (init_exec(init)) + return nv_rdport(init->subdev, init->crtc, port); + return 0x00; +} + +static void +init_wrport(struct nvbios_init *init, u16 port, u8 value) +{ + if (init_exec(init)) + nv_wrport(init->subdev, init->crtc, port, value); +} + +static u8 +init_rdvgai(struct nvbios_init *init, u16 port, u8 index) +{ + struct nouveau_subdev *subdev = init->subdev; + if (init_exec(init)) { + int head = init->crtc < 0 ? 0 : init->crtc; + return nv_rdvgai(subdev, head, port, index); + } + return 0x00; +} + +static void +init_wrvgai(struct nvbios_init *init, u16 port, u8 index, u8 value) +{ + /* force head 0 for updates to cr44, it only exists on first head */ + if (nv_device(init->subdev)->card_type < NV_50) { + if (port == 0x03d4 && index == 0x44) + init->crtc = 0; + } + + if (init_exec(init)) { + int head = init->crtc < 0 ? 0 : init->crtc; + nv_wrvgai(init->subdev, head, port, index, value); + } + + /* select head 1 if cr44 write selected it */ + if (nv_device(init->subdev)->card_type < NV_50) { + if (port == 0x03d4 && index == 0x44 && value == 3) + init->crtc = 1; + } +} + +static struct nouveau_i2c_port * +init_i2c(struct nvbios_init *init, int index) +{ + struct nouveau_i2c *i2c = nouveau_i2c(init->bios); + + if (index == 0xff) { + index = NV_I2C_DEFAULT(0); + if (init->outp && init->outp->i2c_upper_default) + index = NV_I2C_DEFAULT(1); + } else + if (index < 0) { + if (!init->outp) { + if (init_exec(init)) + error("script needs output for i2c\n"); + return NULL; + } + + if (index == -2 && init->outp->location) { + index = NV_I2C_TYPE_EXTAUX(init->outp->extdev); + return i2c->find_type(i2c, index); + } + + index = init->outp->i2c_index; + } + + return i2c->find(i2c, index); +} + +static int +init_rdi2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg) +{ + struct nouveau_i2c_port *port = init_i2c(init, index); + if (port && init_exec(init)) + return nv_rdi2cr(port, addr, reg); + return -ENODEV; +} + +static int +init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val) +{ + struct nouveau_i2c_port *port = init_i2c(init, index); + if (port && init_exec(init)) + return nv_wri2cr(port, addr, reg, val); + return -ENODEV; +} + +static int +init_rdauxr(struct nvbios_init *init, u32 addr) +{ + struct nouveau_i2c_port *port = init_i2c(init, -2); + u8 data; + + if (port && init_exec(init)) { + int ret = nv_rdaux(port, addr, &data, 1); + if (ret) + return ret; + return data; + } + + return -ENODEV; +} + +static int +init_wrauxr(struct nvbios_init *init, u32 addr, u8 data) +{ + struct nouveau_i2c_port *port = init_i2c(init, -2); + if (port && init_exec(init)) + return nv_wraux(port, addr, &data, 1); + return -ENODEV; +} + +static void +init_prog_pll(struct nvbios_init *init, u32 id, u32 freq) +{ + struct nouveau_clock *clk = nouveau_clock(init->bios); + if (clk && clk->pll_set && init_exec(init)) { + int ret = clk->pll_set(clk, id, freq); + if (ret) + warn("failed to prog pll 0x%08x to %dkHz\n", id, freq); + } +} + +/****************************************************************************** + * parsing of bios structures that are required to execute init tables + *****************************************************************************/ + +static u16 +init_table(struct nouveau_bios *bios, u16 *len) +{ + struct bit_entry bit_I; + + if (!bit_entry(bios, 'I', &bit_I)) { + *len = bit_I.length; + return bit_I.offset; + } + + if (bmp_version(bios) >= 0x0510) { + *len = 14; + return bios->bmp_offset + 75; + } + + return 0x0000; +} + +static u16 +init_table_(struct nvbios_init *init, u16 offset, const char *name) +{ + struct nouveau_bios *bios = init->bios; + u16 len, data = init_table(bios, &len); + if (data) { + if (len >= offset + 2) { + data = nv_ro16(bios, data + offset); + if (data) + return data; + + warn("%s pointer invalid\n", name); + return 0x0000; + } + + warn("init data too short for %s pointer", name); + return 0x0000; + } + + warn("init data not found\n"); + return 0x0000; +} + +#define init_script_table(b) init_table_((b), 0x00, "script table") +#define init_macro_index_table(b) init_table_((b), 0x02, "macro index table") +#define init_macro_table(b) init_table_((b), 0x04, "macro table") +#define init_condition_table(b) init_table_((b), 0x06, "condition table") +#define init_io_condition_table(b) init_table_((b), 0x08, "io condition table") +#define init_io_flag_condition_table(b) init_table_((b), 0x0a, "io flag conditon table") +#define init_function_table(b) init_table_((b), 0x0c, "function table") +#define init_xlat_table(b) init_table_((b), 0x10, "xlat table"); + +static u16 +init_script(struct nouveau_bios *bios, int index) +{ + struct nvbios_init init = { .bios = bios }; + u16 data; + + if (bmp_version(bios) && bmp_version(bios) < 0x0510) { + if (index > 1) + return 0x0000; + + data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18); + return nv_ro16(bios, data + (index * 2)); + } + + data = init_script_table(&init); + if (data) + return nv_ro16(bios, data + (index * 2)); + + return 0x0000; +} + +static u16 +init_unknown_script(struct nouveau_bios *bios) +{ + u16 len, data = init_table(bios, &len); + if (data && len >= 16) + return nv_ro16(bios, data + 14); + return 0x0000; +} + +static u16 +init_ram_restrict_table(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + struct bit_entry bit_M; + u16 data = 0x0000; + + if (!bit_entry(bios, 'M', &bit_M)) { + if (bit_M.version == 1 && bit_M.length >= 5) + data = nv_ro16(bios, bit_M.offset + 3); + if (bit_M.version == 2 && bit_M.length >= 3) + data = nv_ro16(bios, bit_M.offset + 1); + } + + if (data == 0x0000) + warn("ram restrict table not found\n"); + return data; +} + +static u8 +init_ram_restrict_group_count(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + struct bit_entry bit_M; + + if (!bit_entry(bios, 'M', &bit_M)) { + if (bit_M.version == 1 && bit_M.length >= 5) + return nv_ro08(bios, bit_M.offset + 2); + if (bit_M.version == 2 && bit_M.length >= 3) + return nv_ro08(bios, bit_M.offset + 0); + } + + return 0x00; +} + +static u8 +init_ram_restrict_strap(struct nvbios_init *init) +{ + /* This appears to be the behaviour of the VBIOS parser, and *is* + * important to cache the NV_PEXTDEV_BOOT0 on later chipsets to + * avoid fucking up the memory controller (somehow) by reading it + * on every INIT_RAM_RESTRICT_ZM_GROUP opcode. + * + * Preserving the non-caching behaviour on earlier chipsets just + * in case *not* re-reading the strap causes similar breakage. + */ + if (!init->ramcfg || init->bios->version.major < 0x70) + init->ramcfg = init_rd32(init, 0x101000); + return (init->ramcfg & 0x00000003c) >> 2; +} + +static u8 +init_ram_restrict(struct nvbios_init *init) +{ + u8 strap = init_ram_restrict_strap(init); + u16 table = init_ram_restrict_table(init); + if (table) + return nv_ro08(init->bios, table + strap); + return 0x00; +} + +static u8 +init_xlat_(struct nvbios_init *init, u8 index, u8 offset) +{ + struct nouveau_bios *bios = init->bios; + u16 table = init_xlat_table(init); + if (table) { + u16 data = nv_ro16(bios, table + (index * 2)); + if (data) + return nv_ro08(bios, data + offset); + warn("xlat table pointer %d invalid\n", index); + } + return 0x00; +} + +/****************************************************************************** + * utility functions used by various init opcode handlers + *****************************************************************************/ + +static bool +init_condition_met(struct nvbios_init *init, u8 cond) +{ + struct nouveau_bios *bios = init->bios; + u16 table = init_condition_table(init); + if (table) { + u32 reg = nv_ro32(bios, table + (cond * 12) + 0); + u32 msk = nv_ro32(bios, table + (cond * 12) + 4); + u32 val = nv_ro32(bios, table + (cond * 12) + 8); + trace("\t[0x%02x] (R[0x%06x] & 0x%08x) == 0x%08x\n", + cond, reg, msk, val); + return (init_rd32(init, reg) & msk) == val; + } + return false; +} + +static bool +init_io_condition_met(struct nvbios_init *init, u8 cond) +{ + struct nouveau_bios *bios = init->bios; + u16 table = init_io_condition_table(init); + if (table) { + u16 port = nv_ro16(bios, table + (cond * 5) + 0); + u8 index = nv_ro08(bios, table + (cond * 5) + 2); + u8 mask = nv_ro08(bios, table + (cond * 5) + 3); + u8 value = nv_ro08(bios, table + (cond * 5) + 4); + trace("\t[0x%02x] (0x%04x[0x%02x] & 0x%02x) == 0x%02x\n", + cond, port, index, mask, value); + return (init_rdvgai(init, port, index) & mask) == value; + } + return false; +} + +static bool +init_io_flag_condition_met(struct nvbios_init *init, u8 cond) +{ + struct nouveau_bios *bios = init->bios; + u16 table = init_io_flag_condition_table(init); + if (table) { + u16 port = nv_ro16(bios, table + (cond * 9) + 0); + u8 index = nv_ro08(bios, table + (cond * 9) + 2); + u8 mask = nv_ro08(bios, table + (cond * 9) + 3); + u8 shift = nv_ro08(bios, table + (cond * 9) + 4); + u16 data = nv_ro16(bios, table + (cond * 9) + 5); + u8 dmask = nv_ro08(bios, table + (cond * 9) + 7); + u8 value = nv_ro08(bios, table + (cond * 9) + 8); + u8 ioval = (init_rdvgai(init, port, index) & mask) >> shift; + return (nv_ro08(bios, data + ioval) & dmask) == value; + } + return false; +} + +static inline u32 +init_shift(u32 data, u8 shift) +{ + if (shift < 0x80) + return data >> shift; + return data << (0x100 - shift); +} + +static u32 +init_tmds_reg(struct nvbios_init *init, u8 tmds) +{ + /* For mlv < 0x80, it is an index into a table of TMDS base addresses. + * For mlv == 0x80 use the "or" value of the dcb_entry indexed by + * CR58 for CR57 = 0 to index a table of offsets to the basic + * 0x6808b0 address. + * For mlv == 0x81 use the "or" value of the dcb_entry indexed by + * CR58 for CR57 = 0 to index a table of offsets to the basic + * 0x6808b0 address, and then flip the offset by 8. + */ + + const int pramdac_offset[13] = { + 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 }; + const u32 pramdac_table[4] = { + 0x6808b0, 0x6808b8, 0x6828b0, 0x6828b8 }; + + if (tmds >= 0x80) { + if (init->outp) { + u32 dacoffset = pramdac_offset[init->outp->or]; + if (tmds == 0x81) + dacoffset ^= 8; + return 0x6808b0 + dacoffset; + } + + if (init_exec(init)) + error("tmds opcodes need dcb\n"); + } else { + if (tmds < ARRAY_SIZE(pramdac_table)) + return pramdac_table[tmds]; + + error("tmds selector 0x%02x unknown\n", tmds); + } + + return 0; +} + +/****************************************************************************** + * init opcode handlers + *****************************************************************************/ + +/** + * init_reserved - stub for various unknown/unused single-byte opcodes + * + */ +static void +init_reserved(struct nvbios_init *init) +{ + u8 opcode = nv_ro08(init->bios, init->offset); + u8 length, i; + + switch (opcode) { + case 0xaa: + length = 4; + break; + default: + length = 1; + break; + } + + trace("RESERVED 0x%02x\t", opcode); + for (i = 1; i < length; i++) + cont(" 0x%02x", nv_ro08(init->bios, init->offset + i)); + cont("\n"); + init->offset += length; +} + +/** + * INIT_DONE - opcode 0x71 + * + */ +static void +init_done(struct nvbios_init *init) +{ + trace("DONE\n"); + init->offset = 0x0000; +} + +/** + * INIT_IO_RESTRICT_PROG - opcode 0x32 + * + */ +static void +init_io_restrict_prog(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u16 port = nv_ro16(bios, init->offset + 1); + u8 index = nv_ro08(bios, init->offset + 3); + u8 mask = nv_ro08(bios, init->offset + 4); + u8 shift = nv_ro08(bios, init->offset + 5); + u8 count = nv_ro08(bios, init->offset + 6); + u32 reg = nv_ro32(bios, init->offset + 7); + u8 conf, i; + + trace("IO_RESTRICT_PROG\tR[0x%06x] = " + "((0x%04x[0x%02x] & 0x%02x) >> %d) [{\n", + reg, port, index, mask, shift); + init->offset += 11; + + conf = (init_rdvgai(init, port, index) & mask) >> shift; + for (i = 0; i < count; i++) { + u32 data = nv_ro32(bios, init->offset); + + if (i == conf) { + trace("\t0x%08x *\n", data); + init_wr32(init, reg, data); + } else { + trace("\t0x%08x\n", data); + } + + init->offset += 4; + } + trace("}]\n"); +} + +/** + * INIT_REPEAT - opcode 0x33 + * + */ +static void +init_repeat(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 count = nv_ro08(bios, init->offset + 1); + u16 repeat = init->repeat; + + trace("REPEAT\t0x%02x\n", count); + init->offset += 2; + + init->repeat = init->offset; + init->repend = init->offset; + while (count--) { + init->offset = init->repeat; + nvbios_exec(init); + if (count) + trace("REPEAT\t0x%02x\n", count); + } + init->offset = init->repend; + init->repeat = repeat; +} + +/** + * INIT_IO_RESTRICT_PLL - opcode 0x34 + * + */ +static void +init_io_restrict_pll(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u16 port = nv_ro16(bios, init->offset + 1); + u8 index = nv_ro08(bios, init->offset + 3); + u8 mask = nv_ro08(bios, init->offset + 4); + u8 shift = nv_ro08(bios, init->offset + 5); + s8 iofc = nv_ro08(bios, init->offset + 6); + u8 count = nv_ro08(bios, init->offset + 7); + u32 reg = nv_ro32(bios, init->offset + 8); + u8 conf, i; + + trace("IO_RESTRICT_PLL\tR[0x%06x] =PLL= " + "((0x%04x[0x%02x] & 0x%02x) >> 0x%02x) IOFCOND 0x%02x [{\n", + reg, port, index, mask, shift, iofc); + init->offset += 12; + + conf = (init_rdvgai(init, port, index) & mask) >> shift; + for (i = 0; i < count; i++) { + u32 freq = nv_ro16(bios, init->offset) * 10; + + if (i == conf) { + trace("\t%dkHz *\n", freq); + if (iofc > 0 && init_io_flag_condition_met(init, iofc)) + freq *= 2; + init_prog_pll(init, reg, freq); + } else { + trace("\t%dkHz\n", freq); + } + + init->offset += 2; + } + trace("}]\n"); +} + +/** + * INIT_END_REPEAT - opcode 0x36 + * + */ +static void +init_end_repeat(struct nvbios_init *init) +{ + trace("END_REPEAT\n"); + init->offset += 1; + + if (init->repeat) { + init->repend = init->offset; + init->offset = 0; + } +} + +/** + * INIT_COPY - opcode 0x37 + * + */ +static void +init_copy(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u32 reg = nv_ro32(bios, init->offset + 1); + u8 shift = nv_ro08(bios, init->offset + 5); + u8 smask = nv_ro08(bios, init->offset + 6); + u16 port = nv_ro16(bios, init->offset + 7); + u8 index = nv_ro08(bios, init->offset + 9); + u8 mask = nv_ro08(bios, init->offset + 10); + u8 data; + + trace("COPY\t0x%04x[0x%02x] &= 0x%02x |= " + "((R[0x%06x] %s 0x%02x) & 0x%02x)\n", + port, index, mask, reg, (shift & 0x80) ? "<<" : ">>", + (shift & 0x80) ? (0x100 - shift) : shift, smask); + init->offset += 11; + + data = init_rdvgai(init, port, index) & mask; + data |= init_shift(init_rd32(init, reg), shift) & smask; + init_wrvgai(init, port, index, data); +} + +/** + * INIT_NOT - opcode 0x38 + * + */ +static void +init_not(struct nvbios_init *init) +{ + trace("NOT\n"); + init->offset += 1; + init_exec_inv(init); +} + +/** + * INIT_IO_FLAG_CONDITION - opcode 0x39 + * + */ +static void +init_io_flag_condition(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 cond = nv_ro08(bios, init->offset + 1); + + trace("IO_FLAG_CONDITION\t0x%02x\n", cond); + init->offset += 2; + + if (!init_io_flag_condition_met(init, cond)) + init_exec_set(init, false); +} + +/** + * INIT_DP_CONDITION - opcode 0x3a + * + */ +static void +init_dp_condition(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + struct nvbios_dpout info; + u8 cond = nv_ro08(bios, init->offset + 1); + u8 unkn = nv_ro08(bios, init->offset + 2); + u8 ver, hdr, cnt, len; + u16 data; + + trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn); + init->offset += 3; + + switch (cond) { + case 0: + if (init_conn(init) != DCB_CONNECTOR_eDP) + init_exec_set(init, false); + break; + case 1: + case 2: + if ( init->outp && + (data = nvbios_dpout_match(bios, DCB_OUTPUT_DP, + (init->outp->or << 0) | + (init->outp->sorconf.link << 6), + &ver, &hdr, &cnt, &len, &info))) + { + if (!(info.flags & cond)) + init_exec_set(init, false); + break; + } + + if (init_exec(init)) + warn("script needs dp output table data\n"); + break; + case 5: + if (!(init_rdauxr(init, 0x0d) & 1)) + init_exec_set(init, false); + break; + default: + warn("unknown dp condition 0x%02x\n", cond); + break; + } +} + +/** + * INIT_IO_MASK_OR - opcode 0x3b + * + */ +static void +init_io_mask_or(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 index = nv_ro08(bios, init->offset + 1); + u8 or = init_or(init); + u8 data; + + trace("IO_MASK_OR\t0x03d4[0x%02x] &= ~(1 << 0x%02x)\n", index, or); + init->offset += 2; + + data = init_rdvgai(init, 0x03d4, index); + init_wrvgai(init, 0x03d4, index, data &= ~(1 << or)); +} + +/** + * INIT_IO_OR - opcode 0x3c + * + */ +static void +init_io_or(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 index = nv_ro08(bios, init->offset + 1); + u8 or = init_or(init); + u8 data; + + trace("IO_OR\t0x03d4[0x%02x] |= (1 << 0x%02x)\n", index, or); + init->offset += 2; + + data = init_rdvgai(init, 0x03d4, index); + init_wrvgai(init, 0x03d4, index, data | (1 << or)); +} + +/** + * INIT_INDEX_ADDRESS_LATCHED - opcode 0x49 + * + */ +static void +init_idx_addr_latched(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u32 creg = nv_ro32(bios, init->offset + 1); + u32 dreg = nv_ro32(bios, init->offset + 5); + u32 mask = nv_ro32(bios, init->offset + 9); + u32 data = nv_ro32(bios, init->offset + 13); + u8 count = nv_ro08(bios, init->offset + 17); + + trace("INDEX_ADDRESS_LATCHED\t" + "R[0x%06x] : R[0x%06x]\n\tCTRL &= 0x%08x |= 0x%08x\n", + creg, dreg, mask, data); + init->offset += 18; + + while (count--) { + u8 iaddr = nv_ro08(bios, init->offset + 0); + u8 idata = nv_ro08(bios, init->offset + 1); + + trace("\t[0x%02x] = 0x%02x\n", iaddr, idata); + init->offset += 2; + + init_wr32(init, dreg, idata); + init_mask(init, creg, ~mask, data | iaddr); + } +} + +/** + * INIT_IO_RESTRICT_PLL2 - opcode 0x4a + * + */ +static void +init_io_restrict_pll2(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u16 port = nv_ro16(bios, init->offset + 1); + u8 index = nv_ro08(bios, init->offset + 3); + u8 mask = nv_ro08(bios, init->offset + 4); + u8 shift = nv_ro08(bios, init->offset + 5); + u8 count = nv_ro08(bios, init->offset + 6); + u32 reg = nv_ro32(bios, init->offset + 7); + u8 conf, i; + + trace("IO_RESTRICT_PLL2\t" + "R[0x%06x] =PLL= ((0x%04x[0x%02x] & 0x%02x) >> 0x%02x) [{\n", + reg, port, index, mask, shift); + init->offset += 11; + + conf = (init_rdvgai(init, port, index) & mask) >> shift; + for (i = 0; i < count; i++) { + u32 freq = nv_ro32(bios, init->offset); + if (i == conf) { + trace("\t%dkHz *\n", freq); + init_prog_pll(init, reg, freq); + } else { + trace("\t%dkHz\n", freq); + } + init->offset += 4; + } + trace("}]\n"); +} + +/** + * INIT_PLL2 - opcode 0x4b + * + */ +static void +init_pll2(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u32 reg = nv_ro32(bios, init->offset + 1); + u32 freq = nv_ro32(bios, init->offset + 5); + + trace("PLL2\tR[0x%06x] =PLL= %dkHz\n", reg, freq); + init->offset += 9; + + init_prog_pll(init, reg, freq); +} + +/** + * INIT_I2C_BYTE - opcode 0x4c + * + */ +static void +init_i2c_byte(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 index = nv_ro08(bios, init->offset + 1); + u8 addr = nv_ro08(bios, init->offset + 2) >> 1; + u8 count = nv_ro08(bios, init->offset + 3); + + trace("I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr); + init->offset += 4; + + while (count--) { + u8 reg = nv_ro08(bios, init->offset + 0); + u8 mask = nv_ro08(bios, init->offset + 1); + u8 data = nv_ro08(bios, init->offset + 2); + int val; + + trace("\t[0x%02x] &= 0x%02x |= 0x%02x\n", reg, mask, data); + init->offset += 3; + + val = init_rdi2cr(init, index, addr, reg); + if (val < 0) + continue; + init_wri2cr(init, index, addr, reg, (val & mask) | data); + } +} + +/** + * INIT_ZM_I2C_BYTE - opcode 0x4d + * + */ +static void +init_zm_i2c_byte(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 index = nv_ro08(bios, init->offset + 1); + u8 addr = nv_ro08(bios, init->offset + 2) >> 1; + u8 count = nv_ro08(bios, init->offset + 3); + + trace("ZM_I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr); + init->offset += 4; + + while (count--) { + u8 reg = nv_ro08(bios, init->offset + 0); + u8 data = nv_ro08(bios, init->offset + 1); + + trace("\t[0x%02x] = 0x%02x\n", reg, data); + init->offset += 2; + + init_wri2cr(init, index, addr, reg, data); + } + +} + +/** + * INIT_ZM_I2C - opcode 0x4e + * + */ +static void +init_zm_i2c(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 index = nv_ro08(bios, init->offset + 1); + u8 addr = nv_ro08(bios, init->offset + 2) >> 1; + u8 count = nv_ro08(bios, init->offset + 3); + u8 data[256], i; + + trace("ZM_I2C\tI2C[0x%02x][0x%02x]\n", index, addr); + init->offset += 4; + + for (i = 0; i < count; i++) { + data[i] = nv_ro08(bios, init->offset); + trace("\t0x%02x\n", data[i]); + init->offset++; + } + + if (init_exec(init)) { + struct nouveau_i2c_port *port = init_i2c(init, index); + struct i2c_msg msg = { + .addr = addr, .flags = 0, .len = count, .buf = data, + }; + int ret; + + if (port && (ret = i2c_transfer(&port->adapter, &msg, 1)) != 1) + warn("i2c wr failed, %d\n", ret); + } +} + +/** + * INIT_TMDS - opcode 0x4f + * + */ +static void +init_tmds(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 tmds = nv_ro08(bios, init->offset + 1); + u8 addr = nv_ro08(bios, init->offset + 2); + u8 mask = nv_ro08(bios, init->offset + 3); + u8 data = nv_ro08(bios, init->offset + 4); + u32 reg = init_tmds_reg(init, tmds); + + trace("TMDS\tT[0x%02x][0x%02x] &= 0x%02x |= 0x%02x\n", + tmds, addr, mask, data); + init->offset += 5; + + if (reg == 0) + return; + + init_wr32(init, reg + 0, addr | 0x00010000); + init_wr32(init, reg + 4, data | (init_rd32(init, reg + 4) & mask)); + init_wr32(init, reg + 0, addr); +} + +/** + * INIT_ZM_TMDS_GROUP - opcode 0x50 + * + */ +static void +init_zm_tmds_group(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 tmds = nv_ro08(bios, init->offset + 1); + u8 count = nv_ro08(bios, init->offset + 2); + u32 reg = init_tmds_reg(init, tmds); + + trace("TMDS_ZM_GROUP\tT[0x%02x]\n", tmds); + init->offset += 3; + + while (count--) { + u8 addr = nv_ro08(bios, init->offset + 0); + u8 data = nv_ro08(bios, init->offset + 1); + + trace("\t[0x%02x] = 0x%02x\n", addr, data); + init->offset += 2; + + init_wr32(init, reg + 4, data); + init_wr32(init, reg + 0, addr); + } +} + +/** + * INIT_CR_INDEX_ADDRESS_LATCHED - opcode 0x51 + * + */ +static void +init_cr_idx_adr_latch(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 addr0 = nv_ro08(bios, init->offset + 1); + u8 addr1 = nv_ro08(bios, init->offset + 2); + u8 base = nv_ro08(bios, init->offset + 3); + u8 count = nv_ro08(bios, init->offset + 4); + u8 save0; + + trace("CR_INDEX_ADDR C[%02x] C[%02x]\n", addr0, addr1); + init->offset += 5; + + save0 = init_rdvgai(init, 0x03d4, addr0); + while (count--) { + u8 data = nv_ro08(bios, init->offset); + + trace("\t\t[0x%02x] = 0x%02x\n", base, data); + init->offset += 1; + + init_wrvgai(init, 0x03d4, addr0, base++); + init_wrvgai(init, 0x03d4, addr1, data); + } + init_wrvgai(init, 0x03d4, addr0, save0); +} + +/** + * INIT_CR - opcode 0x52 + * + */ +static void +init_cr(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 addr = nv_ro08(bios, init->offset + 1); + u8 mask = nv_ro08(bios, init->offset + 2); + u8 data = nv_ro08(bios, init->offset + 3); + u8 val; + + trace("CR\t\tC[0x%02x] &= 0x%02x |= 0x%02x\n", addr, mask, data); + init->offset += 4; + + val = init_rdvgai(init, 0x03d4, addr) & mask; + init_wrvgai(init, 0x03d4, addr, val | data); +} + +/** + * INIT_ZM_CR - opcode 0x53 + * + */ +static void +init_zm_cr(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 addr = nv_ro08(bios, init->offset + 1); + u8 data = nv_ro08(bios, init->offset + 2); + + trace("ZM_CR\tC[0x%02x] = 0x%02x\n", addr, data); + init->offset += 3; + + init_wrvgai(init, 0x03d4, addr, data); +} + +/** + * INIT_ZM_CR_GROUP - opcode 0x54 + * + */ +static void +init_zm_cr_group(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 count = nv_ro08(bios, init->offset + 1); + + trace("ZM_CR_GROUP\n"); + init->offset += 2; + + while (count--) { + u8 addr = nv_ro08(bios, init->offset + 0); + u8 data = nv_ro08(bios, init->offset + 1); + + trace("\t\tC[0x%02x] = 0x%02x\n", addr, data); + init->offset += 2; + + init_wrvgai(init, 0x03d4, addr, data); + } +} + +/** + * INIT_CONDITION_TIME - opcode 0x56 + * + */ +static void +init_condition_time(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 cond = nv_ro08(bios, init->offset + 1); + u8 retry = nv_ro08(bios, init->offset + 2); + u8 wait = min((u16)retry * 50, 100); + + trace("CONDITION_TIME\t0x%02x 0x%02x\n", cond, retry); + init->offset += 3; + + if (!init_exec(init)) + return; + + while (wait--) { + if (init_condition_met(init, cond)) + return; + mdelay(20); + } + + init_exec_set(init, false); +} + +/** + * INIT_LTIME - opcode 0x57 + * + */ +static void +init_ltime(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u16 msec = nv_ro16(bios, init->offset + 1); + + trace("LTIME\t0x%04x\n", msec); + init->offset += 3; + + if (init_exec(init)) + mdelay(msec); +} + +/** + * INIT_ZM_REG_SEQUENCE - opcode 0x58 + * + */ +static void +init_zm_reg_sequence(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u32 base = nv_ro32(bios, init->offset + 1); + u8 count = nv_ro08(bios, init->offset + 5); + + trace("ZM_REG_SEQUENCE\t0x%02x\n", count); + init->offset += 6; + + while (count--) { + u32 data = nv_ro32(bios, init->offset); + + trace("\t\tR[0x%06x] = 0x%08x\n", base, data); + init->offset += 4; + + init_wr32(init, base, data); + base += 4; + } +} + +/** + * INIT_SUB_DIRECT - opcode 0x5b + * + */ +static void +init_sub_direct(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u16 addr = nv_ro16(bios, init->offset + 1); + u16 save; + + trace("SUB_DIRECT\t0x%04x\n", addr); + + if (init_exec(init)) { + save = init->offset; + init->offset = addr; + if (nvbios_exec(init)) { + error("error parsing sub-table\n"); + return; + } + init->offset = save; + } + + init->offset += 3; +} + +/** + * INIT_JUMP - opcode 0x5c + * + */ +static void +init_jump(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u16 offset = nv_ro16(bios, init->offset + 1); + + trace("JUMP\t0x%04x\n", offset); + init->offset = offset; +} + +/** + * INIT_I2C_IF - opcode 0x5e + * + */ +static void +init_i2c_if(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 index = nv_ro08(bios, init->offset + 1); + u8 addr = nv_ro08(bios, init->offset + 2); + u8 reg = nv_ro08(bios, init->offset + 3); + u8 mask = nv_ro08(bios, init->offset + 4); + u8 data = nv_ro08(bios, init->offset + 5); + u8 value; + + trace("I2C_IF\tI2C[0x%02x][0x%02x][0x%02x] & 0x%02x == 0x%02x\n", + index, addr, reg, mask, data); + init->offset += 6; + init_exec_force(init, true); + + value = init_rdi2cr(init, index, addr, reg); + if ((value & mask) != data) + init_exec_set(init, false); + + init_exec_force(init, false); +} + +/** + * INIT_COPY_NV_REG - opcode 0x5f + * + */ +static void +init_copy_nv_reg(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u32 sreg = nv_ro32(bios, init->offset + 1); + u8 shift = nv_ro08(bios, init->offset + 5); + u32 smask = nv_ro32(bios, init->offset + 6); + u32 sxor = nv_ro32(bios, init->offset + 10); + u32 dreg = nv_ro32(bios, init->offset + 14); + u32 dmask = nv_ro32(bios, init->offset + 18); + u32 data; + + trace("COPY_NV_REG\tR[0x%06x] &= 0x%08x |= " + "((R[0x%06x] %s 0x%02x) & 0x%08x ^ 0x%08x)\n", + dreg, dmask, sreg, (shift & 0x80) ? "<<" : ">>", + (shift & 0x80) ? (0x100 - shift) : shift, smask, sxor); + init->offset += 22; + + data = init_shift(init_rd32(init, sreg), shift); + init_mask(init, dreg, ~dmask, (data & smask) ^ sxor); +} + +/** + * INIT_ZM_INDEX_IO - opcode 0x62 + * + */ +static void +init_zm_index_io(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u16 port = nv_ro16(bios, init->offset + 1); + u8 index = nv_ro08(bios, init->offset + 3); + u8 data = nv_ro08(bios, init->offset + 4); + + trace("ZM_INDEX_IO\tI[0x%04x][0x%02x] = 0x%02x\n", port, index, data); + init->offset += 5; + + init_wrvgai(init, port, index, data); +} + +/** + * INIT_COMPUTE_MEM - opcode 0x63 + * + */ +static void +init_compute_mem(struct nvbios_init *init) +{ + struct nouveau_devinit *devinit = nouveau_devinit(init->bios); + + trace("COMPUTE_MEM\n"); + init->offset += 1; + + init_exec_force(init, true); + if (init_exec(init) && devinit->meminit) + devinit->meminit(devinit); + init_exec_force(init, false); +} + +/** + * INIT_RESET - opcode 0x65 + * + */ +static void +init_reset(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u32 reg = nv_ro32(bios, init->offset + 1); + u32 data1 = nv_ro32(bios, init->offset + 5); + u32 data2 = nv_ro32(bios, init->offset + 9); + u32 savepci19; + + trace("RESET\tR[0x%08x] = 0x%08x, 0x%08x", reg, data1, data2); + init->offset += 13; + init_exec_force(init, true); + + savepci19 = init_mask(init, 0x00184c, 0x00000f00, 0x00000000); + init_wr32(init, reg, data1); + udelay(10); + init_wr32(init, reg, data2); + init_wr32(init, 0x00184c, savepci19); + init_mask(init, 0x001850, 0x00000001, 0x00000000); + + init_exec_force(init, false); +} + +/** + * INIT_CONFIGURE_MEM - opcode 0x66 + * + */ +static u16 +init_configure_mem_clk(struct nvbios_init *init) +{ + u16 mdata = bmp_mem_init_table(init->bios); + if (mdata) + mdata += (init_rdvgai(init, 0x03d4, 0x3c) >> 4) * 66; + return mdata; +} + +static void +init_configure_mem(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u16 mdata, sdata; + u32 addr, data; + + trace("CONFIGURE_MEM\n"); + init->offset += 1; + + if (bios->version.major > 2) { + init_done(init); + return; + } + init_exec_force(init, true); + + mdata = init_configure_mem_clk(init); + sdata = bmp_sdr_seq_table(bios); + if (nv_ro08(bios, mdata) & 0x01) + sdata = bmp_ddr_seq_table(bios); + mdata += 6; /* skip to data */ + + data = init_rdvgai(init, 0x03c4, 0x01); + init_wrvgai(init, 0x03c4, 0x01, data | 0x20); + + while ((addr = nv_ro32(bios, sdata)) != 0xffffffff) { + switch (addr) { + case 0x10021c: /* CKE_NORMAL */ + case 0x1002d0: /* CMD_REFRESH */ + case 0x1002d4: /* CMD_PRECHARGE */ + data = 0x00000001; + break; + default: + data = nv_ro32(bios, mdata); + mdata += 4; + if (data == 0xffffffff) + continue; + break; + } + + init_wr32(init, addr, data); + } + + init_exec_force(init, false); +} + +/** + * INIT_CONFIGURE_CLK - opcode 0x67 + * + */ +static void +init_configure_clk(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u16 mdata, clock; + + trace("CONFIGURE_CLK\n"); + init->offset += 1; + + if (bios->version.major > 2) { + init_done(init); + return; + } + init_exec_force(init, true); + + mdata = init_configure_mem_clk(init); + + /* NVPLL */ + clock = nv_ro16(bios, mdata + 4) * 10; + init_prog_pll(init, 0x680500, clock); + + /* MPLL */ + clock = nv_ro16(bios, mdata + 2) * 10; + if (nv_ro08(bios, mdata) & 0x01) + clock *= 2; + init_prog_pll(init, 0x680504, clock); + + init_exec_force(init, false); +} + +/** + * INIT_CONFIGURE_PREINIT - opcode 0x68 + * + */ +static void +init_configure_preinit(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u32 strap; + + trace("CONFIGURE_PREINIT\n"); + init->offset += 1; + + if (bios->version.major > 2) { + init_done(init); + return; + } + init_exec_force(init, true); + + strap = init_rd32(init, 0x101000); + strap = ((strap << 2) & 0xf0) | ((strap & 0x40) >> 6); + init_wrvgai(init, 0x03d4, 0x3c, strap); + + init_exec_force(init, false); +} + +/** + * INIT_IO - opcode 0x69 + * + */ +static void +init_io(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u16 port = nv_ro16(bios, init->offset + 1); + u8 mask = nv_ro16(bios, init->offset + 3); + u8 data = nv_ro16(bios, init->offset + 4); + u8 value; + + trace("IO\t\tI[0x%04x] &= 0x%02x |= 0x%02x\n", port, mask, data); + init->offset += 5; + + /* ummm.. yes.. should really figure out wtf this is and why it's + * needed some day.. it's almost certainly wrong, but, it also + * somehow makes things work... + */ + if (nv_device(init->bios)->card_type >= NV_50 && + port == 0x03c3 && data == 0x01) { + init_mask(init, 0x614100, 0xf0800000, 0x00800000); + init_mask(init, 0x00e18c, 0x00020000, 0x00020000); + init_mask(init, 0x614900, 0xf0800000, 0x00800000); + init_mask(init, 0x000200, 0x40000000, 0x00000000); + mdelay(10); + init_mask(init, 0x00e18c, 0x00020000, 0x00000000); + init_mask(init, 0x000200, 0x40000000, 0x40000000); + init_wr32(init, 0x614100, 0x00800018); + init_wr32(init, 0x614900, 0x00800018); + mdelay(10); + init_wr32(init, 0x614100, 0x10000018); + init_wr32(init, 0x614900, 0x10000018); + } + + value = init_rdport(init, port) & mask; + init_wrport(init, port, data | value); +} + +/** + * INIT_SUB - opcode 0x6b + * + */ +static void +init_sub(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 index = nv_ro08(bios, init->offset + 1); + u16 addr, save; + + trace("SUB\t0x%02x\n", index); + + addr = init_script(bios, index); + if (addr && init_exec(init)) { + save = init->offset; + init->offset = addr; + if (nvbios_exec(init)) { + error("error parsing sub-table\n"); + return; + } + init->offset = save; + } + + init->offset += 2; +} + +/** + * INIT_RAM_CONDITION - opcode 0x6d + * + */ +static void +init_ram_condition(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 mask = nv_ro08(bios, init->offset + 1); + u8 value = nv_ro08(bios, init->offset + 2); + + trace("RAM_CONDITION\t" + "(R[0x100000] & 0x%02x) == 0x%02x\n", mask, value); + init->offset += 3; + + if ((init_rd32(init, 0x100000) & mask) != value) + init_exec_set(init, false); +} + +/** + * INIT_NV_REG - opcode 0x6e + * + */ +static void +init_nv_reg(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u32 reg = nv_ro32(bios, init->offset + 1); + u32 mask = nv_ro32(bios, init->offset + 5); + u32 data = nv_ro32(bios, init->offset + 9); + + trace("NV_REG\tR[0x%06x] &= 0x%08x |= 0x%08x\n", reg, mask, data); + init->offset += 13; + + init_mask(init, reg, ~mask, data); +} + +/** + * INIT_MACRO - opcode 0x6f + * + */ +static void +init_macro(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 macro = nv_ro08(bios, init->offset + 1); + u16 table; + + trace("MACRO\t0x%02x\n", macro); + + table = init_macro_table(init); + if (table) { + u32 addr = nv_ro32(bios, table + (macro * 8) + 0); + u32 data = nv_ro32(bios, table + (macro * 8) + 4); + trace("\t\tR[0x%06x] = 0x%08x\n", addr, data); + init_wr32(init, addr, data); + } + + init->offset += 2; +} + +/** + * INIT_RESUME - opcode 0x72 + * + */ +static void +init_resume(struct nvbios_init *init) +{ + trace("RESUME\n"); + init->offset += 1; + init_exec_set(init, true); +} + +/** + * INIT_TIME - opcode 0x74 + * + */ +static void +init_time(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u16 usec = nv_ro16(bios, init->offset + 1); + + trace("TIME\t0x%04x\n", usec); + init->offset += 3; + + if (init_exec(init)) { + if (usec < 1000) + udelay(usec); + else + mdelay((usec + 900) / 1000); + } +} + +/** + * INIT_CONDITION - opcode 0x75 + * + */ +static void +init_condition(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 cond = nv_ro08(bios, init->offset + 1); + + trace("CONDITION\t0x%02x\n", cond); + init->offset += 2; + + if (!init_condition_met(init, cond)) + init_exec_set(init, false); +} + +/** + * INIT_IO_CONDITION - opcode 0x76 + * + */ +static void +init_io_condition(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 cond = nv_ro08(bios, init->offset + 1); + + trace("IO_CONDITION\t0x%02x\n", cond); + init->offset += 2; + + if (!init_io_condition_met(init, cond)) + init_exec_set(init, false); +} + +/** + * INIT_INDEX_IO - opcode 0x78 + * + */ +static void +init_index_io(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u16 port = nv_ro16(bios, init->offset + 1); + u8 index = nv_ro16(bios, init->offset + 3); + u8 mask = nv_ro08(bios, init->offset + 4); + u8 data = nv_ro08(bios, init->offset + 5); + u8 value; + + trace("INDEX_IO\tI[0x%04x][0x%02x] &= 0x%02x |= 0x%02x\n", + port, index, mask, data); + init->offset += 6; + + value = init_rdvgai(init, port, index) & mask; + init_wrvgai(init, port, index, data | value); +} + +/** + * INIT_PLL - opcode 0x79 + * + */ +static void +init_pll(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u32 reg = nv_ro32(bios, init->offset + 1); + u32 freq = nv_ro16(bios, init->offset + 5) * 10; + + trace("PLL\tR[0x%06x] =PLL= %dkHz\n", reg, freq); + init->offset += 7; + + init_prog_pll(init, reg, freq); +} + +/** + * INIT_ZM_REG - opcode 0x7a + * + */ +static void +init_zm_reg(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u32 addr = nv_ro32(bios, init->offset + 1); + u32 data = nv_ro32(bios, init->offset + 5); + + trace("ZM_REG\tR[0x%06x] = 0x%08x\n", addr, data); + init->offset += 9; + + if (addr == 0x000200) + data |= 0x00000001; + + init_wr32(init, addr, data); +} + +/** + * INIT_RAM_RESTRICT_PLL - opcde 0x87 + * + */ +static void +init_ram_restrict_pll(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 type = nv_ro08(bios, init->offset + 1); + u8 count = init_ram_restrict_group_count(init); + u8 strap = init_ram_restrict(init); + u8 cconf; + + trace("RAM_RESTRICT_PLL\t0x%02x\n", type); + init->offset += 2; + + for (cconf = 0; cconf < count; cconf++) { + u32 freq = nv_ro32(bios, init->offset); + + if (cconf == strap) { + trace("%dkHz *\n", freq); + init_prog_pll(init, type, freq); + } else { + trace("%dkHz\n", freq); + } + + init->offset += 4; + } +} + +/** + * INIT_GPIO - opcode 0x8e + * + */ +static void +init_gpio(struct nvbios_init *init) +{ + struct nouveau_gpio *gpio = nouveau_gpio(init->bios); + + trace("GPIO\n"); + init->offset += 1; + + if (init_exec(init) && gpio && gpio->reset) + gpio->reset(gpio, DCB_GPIO_UNUSED); +} + +/** + * INIT_RAM_RESTRICT_ZM_GROUP - opcode 0x8f + * + */ +static void +init_ram_restrict_zm_reg_group(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u32 addr = nv_ro32(bios, init->offset + 1); + u8 incr = nv_ro08(bios, init->offset + 5); + u8 num = nv_ro08(bios, init->offset + 6); + u8 count = init_ram_restrict_group_count(init); + u8 index = init_ram_restrict(init); + u8 i, j; + + trace("RAM_RESTRICT_ZM_REG_GROUP\t" + "R[0x%08x] 0x%02x 0x%02x\n", addr, incr, num); + init->offset += 7; + + for (i = 0; i < num; i++) { + trace("\tR[0x%06x] = {\n", addr); + for (j = 0; j < count; j++) { + u32 data = nv_ro32(bios, init->offset); + + if (j == index) { + trace("\t\t0x%08x *\n", data); + init_wr32(init, addr, data); + } else { + trace("\t\t0x%08x\n", data); + } + + init->offset += 4; + } + trace("\t}\n"); + addr += incr; + } +} + +/** + * INIT_COPY_ZM_REG - opcode 0x90 + * + */ +static void +init_copy_zm_reg(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u32 sreg = nv_ro32(bios, init->offset + 1); + u32 dreg = nv_ro32(bios, init->offset + 5); + + trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", dreg, sreg); + init->offset += 9; + + init_wr32(init, dreg, init_rd32(init, sreg)); +} + +/** + * INIT_ZM_REG_GROUP - opcode 0x91 + * + */ +static void +init_zm_reg_group(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u32 addr = nv_ro32(bios, init->offset + 1); + u8 count = nv_ro08(bios, init->offset + 5); + + trace("ZM_REG_GROUP\tR[0x%06x] =\n", addr); + init->offset += 6; + + while (count--) { + u32 data = nv_ro32(bios, init->offset); + trace("\t0x%08x\n", data); + init_wr32(init, addr, data); + init->offset += 4; + } +} + +/** + * INIT_XLAT - opcode 0x96 + * + */ +static void +init_xlat(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u32 saddr = nv_ro32(bios, init->offset + 1); + u8 sshift = nv_ro08(bios, init->offset + 5); + u8 smask = nv_ro08(bios, init->offset + 6); + u8 index = nv_ro08(bios, init->offset + 7); + u32 daddr = nv_ro32(bios, init->offset + 8); + u32 dmask = nv_ro32(bios, init->offset + 12); + u8 shift = nv_ro08(bios, init->offset + 16); + u32 data; + + trace("INIT_XLAT\tR[0x%06x] &= 0x%08x |= " + "(X%02x((R[0x%06x] %s 0x%02x) & 0x%02x) << 0x%02x)\n", + daddr, dmask, index, saddr, (sshift & 0x80) ? "<<" : ">>", + (sshift & 0x80) ? (0x100 - sshift) : sshift, smask, shift); + init->offset += 17; + + data = init_shift(init_rd32(init, saddr), sshift) & smask; + data = init_xlat_(init, index, data) << shift; + init_mask(init, daddr, ~dmask, data); +} + +/** + * INIT_ZM_MASK_ADD - opcode 0x97 + * + */ +static void +init_zm_mask_add(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u32 addr = nv_ro32(bios, init->offset + 1); + u32 mask = nv_ro32(bios, init->offset + 5); + u32 add = nv_ro32(bios, init->offset + 9); + u32 data; + + trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add); + init->offset += 13; + + data = init_rd32(init, addr); + data = (data & mask) | ((data + add) & ~mask); + init_wr32(init, addr, data); +} + +/** + * INIT_AUXCH - opcode 0x98 + * + */ +static void +init_auxch(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u32 addr = nv_ro32(bios, init->offset + 1); + u8 count = nv_ro08(bios, init->offset + 5); + + trace("AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count); + init->offset += 6; + + while (count--) { + u8 mask = nv_ro08(bios, init->offset + 0); + u8 data = nv_ro08(bios, init->offset + 1); + trace("\tAUX[0x%08x] &= 0x%02x |= 0x%02x\n", addr, mask, data); + mask = init_rdauxr(init, addr) & mask; + init_wrauxr(init, addr, mask | data); + init->offset += 2; + } +} + +/** + * INIT_AUXCH - opcode 0x99 + * + */ +static void +init_zm_auxch(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u32 addr = nv_ro32(bios, init->offset + 1); + u8 count = nv_ro08(bios, init->offset + 5); + + trace("ZM_AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count); + init->offset += 6; + + while (count--) { + u8 data = nv_ro08(bios, init->offset + 0); + trace("\tAUX[0x%08x] = 0x%02x\n", addr, data); + init_wrauxr(init, addr, data); + init->offset += 1; + } +} + +/** + * INIT_I2C_LONG_IF - opcode 0x9a + * + */ +static void +init_i2c_long_if(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + u8 index = nv_ro08(bios, init->offset + 1); + u8 addr = nv_ro08(bios, init->offset + 2) >> 1; + u8 reglo = nv_ro08(bios, init->offset + 3); + u8 reghi = nv_ro08(bios, init->offset + 4); + u8 mask = nv_ro08(bios, init->offset + 5); + u8 data = nv_ro08(bios, init->offset + 6); + struct nouveau_i2c_port *port; + + trace("I2C_LONG_IF\t" + "I2C[0x%02x][0x%02x][0x%02x%02x] & 0x%02x == 0x%02x\n", + index, addr, reglo, reghi, mask, data); + init->offset += 7; + + port = init_i2c(init, index); + if (port) { + u8 i[2] = { reghi, reglo }; + u8 o[1] = {}; + struct i2c_msg msg[] = { + { .addr = addr, .flags = 0, .len = 2, .buf = i }, + { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = o } + }; + int ret; + + ret = i2c_transfer(&port->adapter, msg, 2); + if (ret == 2 && ((o[0] & mask) == data)) + return; + } + + init_exec_set(init, false); +} + +/** + * INIT_GPIO_NE - opcode 0xa9 + * + */ +static void +init_gpio_ne(struct nvbios_init *init) +{ + struct nouveau_bios *bios = init->bios; + struct nouveau_gpio *gpio = nouveau_gpio(bios); + struct dcb_gpio_func func; + u8 count = nv_ro08(bios, init->offset + 1); + u8 idx = 0, ver, len; + u16 data, i; + + trace("GPIO_NE\t"); + init->offset += 2; + + for (i = init->offset; i < init->offset + count; i++) + cont("0x%02x ", nv_ro08(bios, i)); + cont("\n"); + + while ((data = dcb_gpio_parse(bios, 0, idx++, &ver, &len, &func))) { + if (func.func != DCB_GPIO_UNUSED) { + for (i = init->offset; i < init->offset + count; i++) { + if (func.func == nv_ro08(bios, i)) + break; + } + + trace("\tFUNC[0x%02x]", func.func); + if (i == (init->offset + count)) { + cont(" *"); + if (init_exec(init) && gpio && gpio->reset) + gpio->reset(gpio, func.func); + } + cont("\n"); + } + } + + init->offset += count; +} + +static struct nvbios_init_opcode { + void (*exec)(struct nvbios_init *); +} init_opcode[] = { + [0x32] = { init_io_restrict_prog }, + [0x33] = { init_repeat }, + [0x34] = { init_io_restrict_pll }, + [0x36] = { init_end_repeat }, + [0x37] = { init_copy }, + [0x38] = { init_not }, + [0x39] = { init_io_flag_condition }, + [0x3a] = { init_dp_condition }, + [0x3b] = { init_io_mask_or }, + [0x3c] = { init_io_or }, + [0x49] = { init_idx_addr_latched }, + [0x4a] = { init_io_restrict_pll2 }, + [0x4b] = { init_pll2 }, + [0x4c] = { init_i2c_byte }, + [0x4d] = { init_zm_i2c_byte }, + [0x4e] = { init_zm_i2c }, + [0x4f] = { init_tmds }, + [0x50] = { init_zm_tmds_group }, + [0x51] = { init_cr_idx_adr_latch }, + [0x52] = { init_cr }, + [0x53] = { init_zm_cr }, + [0x54] = { init_zm_cr_group }, + [0x56] = { init_condition_time }, + [0x57] = { init_ltime }, + [0x58] = { init_zm_reg_sequence }, + [0x5b] = { init_sub_direct }, + [0x5c] = { init_jump }, + [0x5e] = { init_i2c_if }, + [0x5f] = { init_copy_nv_reg }, + [0x62] = { init_zm_index_io }, + [0x63] = { init_compute_mem }, + [0x65] = { init_reset }, + [0x66] = { init_configure_mem }, + [0x67] = { init_configure_clk }, + [0x68] = { init_configure_preinit }, + [0x69] = { init_io }, + [0x6b] = { init_sub }, + [0x6d] = { init_ram_condition }, + [0x6e] = { init_nv_reg }, + [0x6f] = { init_macro }, + [0x71] = { init_done }, + [0x72] = { init_resume }, + [0x74] = { init_time }, + [0x75] = { init_condition }, + [0x76] = { init_io_condition }, + [0x78] = { init_index_io }, + [0x79] = { init_pll }, + [0x7a] = { init_zm_reg }, + [0x87] = { init_ram_restrict_pll }, + [0x8c] = { init_reserved }, + [0x8d] = { init_reserved }, + [0x8e] = { init_gpio }, + [0x8f] = { init_ram_restrict_zm_reg_group }, + [0x90] = { init_copy_zm_reg }, + [0x91] = { init_zm_reg_group }, + [0x92] = { init_reserved }, + [0x96] = { init_xlat }, + [0x97] = { init_zm_mask_add }, + [0x98] = { init_auxch }, + [0x99] = { init_zm_auxch }, + [0x9a] = { init_i2c_long_if }, + [0xa9] = { init_gpio_ne }, + [0xaa] = { init_reserved }, +}; + +#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0])) + +int +nvbios_exec(struct nvbios_init *init) +{ + init->nested++; + while (init->offset) { + u8 opcode = nv_ro08(init->bios, init->offset); + if (opcode >= init_opcode_nr || !init_opcode[opcode].exec) { + error("unknown opcode 0x%02x\n", opcode); + return -EINVAL; + } + + init_opcode[opcode].exec(init); + } + init->nested--; + return 0; +} + +int +nvbios_init(struct nouveau_subdev *subdev, bool execute) +{ + struct nouveau_bios *bios = nouveau_bios(subdev); + int ret = 0; + int i = -1; + u16 data; + + if (execute) + nv_info(bios, "running init tables\n"); + while (!ret && (data = (init_script(bios, ++i)))) { + struct nvbios_init init = { + .subdev = subdev, + .bios = bios, + .offset = data, + .outp = NULL, + .crtc = -1, + .execute = execute ? 1 : 0, + }; + + ret = nvbios_exec(&init); + } + + /* the vbios parser will run this right after the normal init + * tables, whereas the binary driver appears to run it later. + */ + if (!ret && (data = init_unknown_script(bios))) { + struct nvbios_init init = { + .subdev = subdev, + .bios = bios, + .offset = data, + .outp = NULL, + .crtc = -1, + .execute = execute ? 1 : 0, + }; + + ret = nvbios_exec(&init); + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c new file mode 100644 index 0000000..2610b11 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c @@ -0,0 +1,135 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/bios.h> +#include <subdev/bios/bit.h> +#include <subdev/bios/mxm.h> + +u16 +mxm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr) +{ + struct bit_entry x; + + if (bit_entry(bios, 'x', &x)) { + nv_debug(bios, "BIT 'x' table not present\n"); + return 0x0000; + } + + *ver = x.version; + *hdr = x.length; + if (*ver != 1 || *hdr < 3) { + nv_warn(bios, "BIT 'x' table %d/%d unknown\n", *ver, *hdr); + return 0x0000; + } + + return x.offset; +} + +/* These map MXM v2.x digital connection values to the appropriate SOR/link, + * hopefully they're correct for all boards within the same chipset... + * + * MXM v3.x VBIOS are nicer and provide pointers to these tables. + */ +static u8 nv84_sor_map[16] = { + 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +static u8 nv92_sor_map[16] = { + 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31, + 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +static u8 nv94_sor_map[16] = { + 0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31, + 0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +static u8 nv98_sor_map[16] = { + 0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31, + 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +u8 +mxm_sor_map(struct nouveau_bios *bios, u8 conn) +{ + u8 ver, hdr; + u16 mxm = mxm_table(bios, &ver, &hdr); + if (mxm && hdr >= 6) { + u16 map = nv_ro16(bios, mxm + 4); + if (map) { + ver = nv_ro08(bios, map); + if (ver == 0x10) { + if (conn < nv_ro08(bios, map + 3)) { + map += nv_ro08(bios, map + 1); + map += conn; + return nv_ro08(bios, map); + } + + return 0x00; + } + + nv_warn(bios, "unknown sor map v%02x\n", ver); + } + } + + if (bios->version.chip == 0x84 || bios->version.chip == 0x86) + return nv84_sor_map[conn]; + if (bios->version.chip == 0x92) + return nv92_sor_map[conn]; + if (bios->version.chip == 0x94 || bios->version.chip == 0x96) + return nv94_sor_map[conn]; + if (bios->version.chip == 0x98) + return nv98_sor_map[conn]; + + nv_warn(bios, "missing sor map\n"); + return 0x00; +} + +u8 +mxm_ddc_map(struct nouveau_bios *bios, u8 port) +{ + u8 ver, hdr; + u16 mxm = mxm_table(bios, &ver, &hdr); + if (mxm && hdr >= 8) { + u16 map = nv_ro16(bios, mxm + 6); + if (map) { + ver = nv_ro08(bios, map); + if (ver == 0x10) { + if (port < nv_ro08(bios, map + 3)) { + map += nv_ro08(bios, map + 1); + map += port; + return nv_ro08(bios, map); + } + + return 0x00; + } + + nv_warn(bios, "unknown ddc map v%02x\n", ver); + } + } + + /* v2.x: directly write port as dcb i2cidx */ + return (port << 4) | port; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c new file mode 100644 index 0000000..bcbb056 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c @@ -0,0 +1,75 @@ +/* + * Copyright 2012 Nouveau Community + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Martin Peres + */ + +#include <subdev/bios.h> +#include <subdev/bios/bit.h> +#include <subdev/bios/perf.h> + +static u16 +perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +{ + struct bit_entry bit_P; + u16 perf = 0x0000; + + if (!bit_entry(bios, 'P', &bit_P)) { + if (bit_P.version <= 2) { + perf = nv_ro16(bios, bit_P.offset + 0); + if (perf) { + *ver = nv_ro08(bios, perf + 0); + *hdr = nv_ro08(bios, perf + 1); + } + } else + nv_error(bios, "unknown offset for perf in BIT P %d\n", + bit_P.version); + } + + if (bios->bmp_offset) { + if (nv_ro08(bios, bios->bmp_offset + 6) >= 0x25) { + perf = nv_ro16(bios, bios->bmp_offset + 0x94); + if (perf) { + *hdr = nv_ro08(bios, perf + 0); + *ver = nv_ro08(bios, perf + 1); + } + } + } + + return perf; +} + +int +nvbios_perf_fan_parse(struct nouveau_bios *bios, + struct nvbios_perf_fan *fan) +{ + u8 ver = 0, hdr = 0, cnt = 0, len = 0; + u16 perf = perf_table(bios, &ver, &hdr, &cnt, &len); + if (!perf) + return -ENODEV; + + if (ver >= 0x20 && ver < 0x40 && hdr > 6) + fan->pwm_divisor = nv_ro16(bios, perf + 6); + else + fan->pwm_divisor = 0; + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c new file mode 100644 index 0000000..f835501 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c @@ -0,0 +1,415 @@ +/* + * Copyright 2005-2006 Erik Waling + * Copyright 2006 Stephane Marchesin + * Copyright 2007-2009 Stuart Bennett + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF + * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <subdev/vga.h> +#include <subdev/bios.h> +#include <subdev/bios/bit.h> +#include <subdev/bios/bmp.h> +#include <subdev/bios/pll.h> + +struct pll_mapping { + u8 type; + u32 reg; +}; + +static struct pll_mapping +nv04_pll_mapping[] = { + { PLL_CORE , 0x680500 }, + { PLL_MEMORY, 0x680504 }, + { PLL_VPLL0 , 0x680508 }, + { PLL_VPLL1 , 0x680520 }, + {} +}; + +static struct pll_mapping +nv40_pll_mapping[] = { + { PLL_CORE , 0x004000 }, + { PLL_MEMORY, 0x004020 }, + { PLL_VPLL0 , 0x680508 }, + { PLL_VPLL1 , 0x680520 }, + {} +}; + +static struct pll_mapping +nv50_pll_mapping[] = { + { PLL_CORE , 0x004028 }, + { PLL_SHADER, 0x004020 }, + { PLL_UNK03 , 0x004000 }, + { PLL_MEMORY, 0x004008 }, + { PLL_UNK40 , 0x00e810 }, + { PLL_UNK41 , 0x00e818 }, + { PLL_UNK42 , 0x00e824 }, + { PLL_VPLL0 , 0x614100 }, + { PLL_VPLL1 , 0x614900 }, + {} +}; + +static struct pll_mapping +nv84_pll_mapping[] = { + { PLL_CORE , 0x004028 }, + { PLL_SHADER, 0x004020 }, + { PLL_MEMORY, 0x004008 }, + { PLL_VDEC , 0x004030 }, + { PLL_UNK41 , 0x00e818 }, + { PLL_VPLL0 , 0x614100 }, + { PLL_VPLL1 , 0x614900 }, + {} +}; + +static u16 +pll_limits_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +{ + struct bit_entry bit_C; + + if (!bit_entry(bios, 'C', &bit_C) && bit_C.length >= 10) { + u16 data = nv_ro16(bios, bit_C.offset + 8); + if (data) { + *ver = nv_ro08(bios, data + 0); + *hdr = nv_ro08(bios, data + 1); + *len = nv_ro08(bios, data + 2); + *cnt = nv_ro08(bios, data + 3); + return data; + } + } + + if (bmp_version(bios) >= 0x0524) { + u16 data = nv_ro16(bios, bios->bmp_offset + 142); + if (data) { + *ver = nv_ro08(bios, data + 0); + *hdr = 1; + *cnt = 1; + *len = 0x18; + return data; + } + } + + *ver = 0x00; + return 0x0000; +} + +static struct pll_mapping * +pll_map(struct nouveau_bios *bios) +{ + switch (nv_device(bios)->card_type) { + case NV_04: + case NV_10: + case NV_20: + case NV_30: + return nv04_pll_mapping; + break; + case NV_40: + return nv40_pll_mapping; + case NV_50: + if (nv_device(bios)->chipset == 0x50) + return nv50_pll_mapping; + else + if (nv_device(bios)->chipset < 0xa3 || + nv_device(bios)->chipset == 0xaa || + nv_device(bios)->chipset == 0xac) + return nv84_pll_mapping; + default: + return NULL; + } +} + +static u16 +pll_map_reg(struct nouveau_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len) +{ + struct pll_mapping *map; + u8 hdr, cnt; + u16 data; + + data = pll_limits_table(bios, ver, &hdr, &cnt, len); + if (data && *ver >= 0x30) { + data += hdr; + while (cnt--) { + if (nv_ro32(bios, data + 3) == reg) { + *type = nv_ro08(bios, data + 0); + return data; + } + data += *len; + } + return 0x0000; + } + + map = pll_map(bios); + while (map->reg) { + if (map->reg == reg && *ver >= 0x20) { + u16 addr = (data += hdr); + *type = map->type; + while (cnt--) { + if (nv_ro32(bios, data) == map->reg) + return data; + data += *len; + } + return addr; + } else + if (map->reg == reg) { + *type = map->type; + return data + 1; + } + map++; + } + + return 0x0000; +} + +static u16 +pll_map_type(struct nouveau_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len) +{ + struct pll_mapping *map; + u8 hdr, cnt; + u16 data; + + data = pll_limits_table(bios, ver, &hdr, &cnt, len); + if (data && *ver >= 0x30) { + data += hdr; + while (cnt--) { + if (nv_ro08(bios, data + 0) == type) { + *reg = nv_ro32(bios, data + 3); + return data; + } + data += *len; + } + return 0x0000; + } + + map = pll_map(bios); + while (map->reg) { + if (map->type == type && *ver >= 0x20) { + u16 addr = (data += hdr); + *reg = map->reg; + while (cnt--) { + if (nv_ro32(bios, data) == map->reg) + return data; + data += *len; + } + return addr; + } else + if (map->type == type) { + *reg = map->reg; + return data + 1; + } + map++; + } + + return 0x0000; +} + +int +nvbios_pll_parse(struct nouveau_bios *bios, u32 type, struct nvbios_pll *info) +{ + u8 ver, len; + u32 reg = type; + u16 data; + + if (type > PLL_MAX) { + reg = type; + data = pll_map_reg(bios, reg, &type, &ver, &len); + } else { + data = pll_map_type(bios, type, ®, &ver, &len); + } + + if (ver && !data) + return -ENOENT; + + memset(info, 0, sizeof(*info)); + info->type = type; + info->reg = reg; + + switch (ver) { + case 0x00: + break; + case 0x10: + case 0x11: + info->vco1.min_freq = nv_ro32(bios, data + 0); + info->vco1.max_freq = nv_ro32(bios, data + 4); + info->vco2.min_freq = nv_ro32(bios, data + 8); + info->vco2.max_freq = nv_ro32(bios, data + 12); + info->vco1.min_inputfreq = nv_ro32(bios, data + 16); + info->vco2.min_inputfreq = nv_ro32(bios, data + 20); + info->vco1.max_inputfreq = INT_MAX; + info->vco2.max_inputfreq = INT_MAX; + + info->max_p = 0x7; + info->max_p_usable = 0x6; + + /* these values taken from nv30/31/36 */ + switch (bios->version.chip) { + case 0x36: + info->vco1.min_n = 0x5; + break; + default: + info->vco1.min_n = 0x1; + break; + } + info->vco1.max_n = 0xff; + info->vco1.min_m = 0x1; + info->vco1.max_m = 0xd; + + /* + * On nv30, 31, 36 (i.e. all cards with two stage PLLs with this + * table version (apart from nv35)), N2 is compared to + * maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and + * save a comparison + */ + info->vco2.min_n = 0x4; + switch (bios->version.chip) { + case 0x30: + case 0x35: + info->vco2.max_n = 0x1f; + break; + default: + info->vco2.max_n = 0x28; + break; + } + info->vco2.min_m = 0x1; + info->vco2.max_m = 0x4; + break; + case 0x20: + case 0x21: + info->vco1.min_freq = nv_ro16(bios, data + 4) * 1000; + info->vco1.max_freq = nv_ro16(bios, data + 6) * 1000; + info->vco2.min_freq = nv_ro16(bios, data + 8) * 1000; + info->vco2.max_freq = nv_ro16(bios, data + 10) * 1000; + info->vco1.min_inputfreq = nv_ro16(bios, data + 12) * 1000; + info->vco2.min_inputfreq = nv_ro16(bios, data + 14) * 1000; + info->vco1.max_inputfreq = nv_ro16(bios, data + 16) * 1000; + info->vco2.max_inputfreq = nv_ro16(bios, data + 18) * 1000; + info->vco1.min_n = nv_ro08(bios, data + 20); + info->vco1.max_n = nv_ro08(bios, data + 21); + info->vco1.min_m = nv_ro08(bios, data + 22); + info->vco1.max_m = nv_ro08(bios, data + 23); + info->vco2.min_n = nv_ro08(bios, data + 24); + info->vco2.max_n = nv_ro08(bios, data + 25); + info->vco2.min_m = nv_ro08(bios, data + 26); + info->vco2.max_m = nv_ro08(bios, data + 27); + + info->max_p = nv_ro08(bios, data + 29); + info->max_p_usable = info->max_p; + if (bios->version.chip < 0x60) + info->max_p_usable = 0x6; + info->bias_p = nv_ro08(bios, data + 30); + + if (len > 0x22) + info->refclk = nv_ro32(bios, data + 31); + break; + case 0x30: + data = nv_ro16(bios, data + 1); + + info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000; + info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000; + info->vco2.min_freq = nv_ro16(bios, data + 4) * 1000; + info->vco2.max_freq = nv_ro16(bios, data + 6) * 1000; + info->vco1.min_inputfreq = nv_ro16(bios, data + 8) * 1000; + info->vco2.min_inputfreq = nv_ro16(bios, data + 10) * 1000; + info->vco1.max_inputfreq = nv_ro16(bios, data + 12) * 1000; + info->vco2.max_inputfreq = nv_ro16(bios, data + 14) * 1000; + info->vco1.min_n = nv_ro08(bios, data + 16); + info->vco1.max_n = nv_ro08(bios, data + 17); + info->vco1.min_m = nv_ro08(bios, data + 18); + info->vco1.max_m = nv_ro08(bios, data + 19); + info->vco2.min_n = nv_ro08(bios, data + 20); + info->vco2.max_n = nv_ro08(bios, data + 21); + info->vco2.min_m = nv_ro08(bios, data + 22); + info->vco2.max_m = nv_ro08(bios, data + 23); + info->max_p_usable = info->max_p = nv_ro08(bios, data + 25); + info->bias_p = nv_ro08(bios, data + 27); + info->refclk = nv_ro32(bios, data + 28); + break; + case 0x40: + info->refclk = nv_ro16(bios, data + 9) * 1000; + data = nv_ro16(bios, data + 1); + + info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000; + info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000; + info->vco1.min_inputfreq = nv_ro16(bios, data + 4) * 1000; + info->vco1.max_inputfreq = nv_ro16(bios, data + 6) * 1000; + info->vco1.min_m = nv_ro08(bios, data + 8); + info->vco1.max_m = nv_ro08(bios, data + 9); + info->vco1.min_n = nv_ro08(bios, data + 10); + info->vco1.max_n = nv_ro08(bios, data + 11); + info->min_p = nv_ro08(bios, data + 12); + info->max_p = nv_ro08(bios, data + 13); + break; + default: + nv_error(bios, "unknown pll limits version 0x%02x\n", ver); + return -EINVAL; + } + + if (!info->refclk) { + info->refclk = nv_device(bios)->crystal; + if (bios->version.chip == 0x51) { + u32 sel_clk = nv_rd32(bios, 0x680524); + if ((info->reg == 0x680508 && sel_clk & 0x20) || + (info->reg == 0x680520 && sel_clk & 0x80)) { + if (nv_rdvgac(bios, 0, 0x27) < 0xa3) + info->refclk = 200000; + else + info->refclk = 25000; + } + } + } + + /* + * By now any valid limit table ought to have set a max frequency for + * vco1, so if it's zero it's either a pre limit table bios, or one + * with an empty limit table (seen on nv18) + */ + if (!info->vco1.max_freq) { + info->vco1.max_freq = nv_ro32(bios, bios->bmp_offset + 67); + info->vco1.min_freq = nv_ro32(bios, bios->bmp_offset + 71); + if (bmp_version(bios) < 0x0506) { + info->vco1.max_freq = 256000; + info->vco1.min_freq = 128000; + } + + info->vco1.min_inputfreq = 0; + info->vco1.max_inputfreq = INT_MAX; + info->vco1.min_n = 0x1; + info->vco1.max_n = 0xff; + info->vco1.min_m = 0x1; + + if (nv_device(bios)->crystal == 13500) { + /* nv05 does this, nv11 doesn't, nv10 unknown */ + if (bios->version.chip < 0x11) + info->vco1.min_m = 0x7; + info->vco1.max_m = 0xd; + } else { + if (bios->version.chip < 0x11) + info->vco1.min_m = 0x8; + info->vco1.max_m = 0xe; + } + + if (bios->version.chip < 0x17 || + bios->version.chip == 0x1a || + bios->version.chip == 0x20) + info->max_p = 4; + else + info->max_p = 5; + info->max_p_usable = info->max_p; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c new file mode 100644 index 0000000..22a2057 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c @@ -0,0 +1,203 @@ +/* + * Copyright 2012 Nouveau Community + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Martin Peres + */ + +#include <subdev/bios.h> +#include <subdev/bios/bit.h> +#include <subdev/bios/therm.h> + +static u16 +therm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt) +{ + struct bit_entry bit_P; + u16 therm = 0; + + if (!bit_entry(bios, 'P', &bit_P)) { + if (bit_P.version == 1) + therm = nv_ro16(bios, bit_P.offset + 12); + else if (bit_P.version == 2) + therm = nv_ro16(bios, bit_P.offset + 16); + else + nv_error(bios, + "unknown offset for thermal in BIT P %d\n", + bit_P.version); + } + + /* exit now if we haven't found the thermal table */ + if (!therm) + return 0x0000; + + *ver = nv_ro08(bios, therm + 0); + *hdr = nv_ro08(bios, therm + 1); + *len = nv_ro08(bios, therm + 2); + *cnt = nv_ro08(bios, therm + 3); + + return therm + nv_ro08(bios, therm + 1); +} + +static u16 +nvbios_therm_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len) +{ + u8 hdr, cnt; + u16 therm = therm_table(bios, ver, &hdr, len, &cnt); + if (therm && idx < cnt) + return therm + idx * *len; + return 0x0000; +} + +int +nvbios_therm_sensor_parse(struct nouveau_bios *bios, + enum nvbios_therm_domain domain, + struct nvbios_therm_sensor *sensor) +{ + s8 thrs_section, sensor_section, offset; + u8 ver, len, i; + u16 entry; + + /* we only support the core domain for now */ + if (domain != NVBIOS_THERM_DOMAIN_CORE) + return -EINVAL; + + /* Read the entries from the table */ + thrs_section = 0; + sensor_section = -1; + i = 0; + while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) { + s16 value = nv_ro16(bios, entry + 1); + + switch (nv_ro08(bios, entry + 0)) { + case 0x0: + thrs_section = value; + if (value > 0) + return 0; /* we do not try to support ambient */ + break; + case 0x01: + sensor_section++; + if (sensor_section == 0) { + offset = ((s8) nv_ro08(bios, entry + 2)) / 2; + sensor->offset_constant = offset; + } + break; + + case 0x04: + if (thrs_section == 0) { + sensor->thrs_critical.temp = (value & 0xff0) >> 4; + sensor->thrs_critical.hysteresis = value & 0xf; + } + break; + + case 0x07: + if (thrs_section == 0) { + sensor->thrs_down_clock.temp = (value & 0xff0) >> 4; + sensor->thrs_down_clock.hysteresis = value & 0xf; + } + break; + + case 0x08: + if (thrs_section == 0) { + sensor->thrs_fan_boost.temp = (value & 0xff0) >> 4; + sensor->thrs_fan_boost.hysteresis = value & 0xf; + } + break; + + case 0x10: + if (sensor_section == 0) + sensor->offset_num = value; + break; + + case 0x11: + if (sensor_section == 0) + sensor->offset_den = value; + break; + + case 0x12: + if (sensor_section == 0) + sensor->slope_mult = value; + break; + + case 0x13: + if (sensor_section == 0) + sensor->slope_div = value; + break; + case 0x32: + if (thrs_section == 0) { + sensor->thrs_shutdown.temp = (value & 0xff0) >> 4; + sensor->thrs_shutdown.hysteresis = value & 0xf; + } + break; + } + } + + return 0; +} + +int +nvbios_therm_fan_parse(struct nouveau_bios *bios, + struct nvbios_therm_fan *fan) +{ + struct nouveau_therm_trip_point *cur_trip = NULL; + u8 ver, len, i; + u16 entry; + + uint8_t duty_lut[] = { 0, 0, 25, 0, 40, 0, 50, 0, + 75, 0, 85, 0, 100, 0, 100, 0 }; + + i = 0; + fan->nr_fan_trip = 0; + while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) { + s16 value = nv_ro16(bios, entry + 1); + + switch (nv_ro08(bios, entry + 0)) { + case 0x22: + fan->min_duty = value & 0xff; + fan->max_duty = (value & 0xff00) >> 8; + break; + case 0x24: + fan->nr_fan_trip++; + cur_trip = &fan->trip[fan->nr_fan_trip - 1]; + cur_trip->hysteresis = value & 0xf; + cur_trip->temp = (value & 0xff0) >> 4; + cur_trip->fan_duty = duty_lut[(value & 0xf000) >> 12]; + break; + case 0x25: + cur_trip = &fan->trip[fan->nr_fan_trip - 1]; + cur_trip->fan_duty = value; + break; + case 0x26: + fan->pwm_freq = value; + break; + case 0x3b: + fan->bump_period = value; + break; + case 0x3c: + fan->slow_down_period = value; + break; + case 0x46: + fan->linear_min_temp = nv_ro08(bios, entry + 1); + fan->linear_max_temp = nv_ro08(bios, entry + 2); + break; + } + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c new file mode 100644 index 0000000..e9b8e5d --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c @@ -0,0 +1,76 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/bios.h> +#include <subdev/bios/gpio.h> +#include <subdev/bios/xpio.h> + +static u16 +dcb_xpiod_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +{ + u16 data = dcb_gpio_table(bios, ver, hdr, cnt, len); + if (data && *ver >= 0x40 && *hdr >= 0x06) { + u16 xpio = nv_ro16(bios, data + 0x04); + if (xpio) { + *ver = nv_ro08(bios, data + 0x00); + *hdr = nv_ro08(bios, data + 0x01); + *cnt = nv_ro08(bios, data + 0x02); + *len = nv_ro08(bios, data + 0x03); + return xpio; + } + } + return 0x0000; +} + +u16 +dcb_xpio_table(struct nouveau_bios *bios, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +{ + u16 data = dcb_xpiod_table(bios, ver, hdr, cnt, len); + if (data && idx < *cnt) { + u16 xpio = nv_ro16(bios, data + *hdr + (idx * *len)); + if (xpio) { + *ver = nv_ro08(bios, data + 0x00); + *hdr = nv_ro08(bios, data + 0x01); + *cnt = nv_ro08(bios, data + 0x02); + *len = nv_ro08(bios, data + 0x03); + return xpio; + } + } + return 0x0000; +} + +u16 +dcb_xpio_parse(struct nouveau_bios *bios, u8 idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, + struct nvbios_xpio *info) +{ + u16 data = dcb_xpio_table(bios, idx, ver, hdr, cnt, len); + if (data && *len >= 6) { + info->type = nv_ro08(bios, data + 0x04); + info->addr = nv_ro08(bios, data + 0x05); + info->flags = nv_ro08(bios, data + 0x06); + } + return 0x0000; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c new file mode 100644 index 0000000..8c7f805 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c @@ -0,0 +1,95 @@ +/* + * Copyright 2012 Nouveau Community + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Martin Peres <martin.peres@labri.fr> + * Ben Skeggs + */ + +#include <subdev/bus.h> + +struct nv04_bus_priv { + struct nouveau_bus base; +}; + +static void +nv04_bus_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_bus *pbus = nouveau_bus(subdev); + u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140); + + if (stat & 0x00000001) { + nv_error(pbus, "BUS ERROR\n"); + stat &= ~0x00000001; + nv_wr32(pbus, 0x001100, 0x00000001); + } + + if (stat & 0x00000110) { + subdev = nouveau_subdev(subdev, NVDEV_SUBDEV_GPIO); + if (subdev && subdev->intr) + subdev->intr(subdev); + stat &= ~0x00000110; + nv_wr32(pbus, 0x001100, 0x00000110); + } + + if (stat) { + nv_error(pbus, "unknown intr 0x%08x\n", stat); + nv_mask(pbus, 0x001140, stat, 0x00000000); + } +} + +static int +nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_bus_priv *priv; + int ret; + + ret = nouveau_bus_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->intr = nv04_bus_intr; + return 0; +} + +static int +nv04_bus_init(struct nouveau_object *object) +{ + struct nv04_bus_priv *priv = (void *)object; + + nv_wr32(priv, 0x001100, 0xffffffff); + nv_wr32(priv, 0x001140, 0x00000111); + + return nouveau_bus_init(&priv->base); +} + +struct nouveau_oclass +nv04_bus_oclass = { + .handle = NV_SUBDEV(BUS, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_bus_ctor, + .dtor = _nouveau_bus_dtor, + .init = nv04_bus_init, + .fini = _nouveau_bus_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c new file mode 100644 index 0000000..34132ae --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c @@ -0,0 +1,112 @@ +/* + * Copyright 2012 Nouveau Community + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Martin Peres <martin.peres@labri.fr> + * Ben Skeggs + */ + +#include <subdev/bus.h> + +struct nv31_bus_priv { + struct nouveau_bus base; +}; + +static void +nv31_bus_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_bus *pbus = nouveau_bus(subdev); + u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140); + u32 gpio = nv_rd32(pbus, 0x001104) & nv_rd32(pbus, 0x001144); + + if (gpio) { + subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_GPIO); + if (subdev && subdev->intr) + subdev->intr(subdev); + } + + if (stat & 0x00000008) { /* NV41- */ + u32 addr = nv_rd32(pbus, 0x009084); + u32 data = nv_rd32(pbus, 0x009088); + + nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n", + (addr & 0x00000002) ? "write" : "read", data, + (addr & 0x00fffffc)); + + stat &= ~0x00000008; + nv_wr32(pbus, 0x001100, 0x00000008); + } + + if (stat & 0x00070000) { + subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_THERM); + if (subdev && subdev->intr) + subdev->intr(subdev); + stat &= ~0x00070000; + nv_wr32(pbus, 0x001100, 0x00070000); + } + + if (stat) { + nv_error(pbus, "unknown intr 0x%08x\n", stat); + nv_mask(pbus, 0x001140, stat, 0x00000000); + } +} + +static int +nv31_bus_init(struct nouveau_object *object) +{ + struct nv31_bus_priv *priv = (void *)object; + int ret; + + ret = nouveau_bus_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x001100, 0xffffffff); + nv_wr32(priv, 0x001140, 0x00070008); + return 0; +} + +static int +nv31_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv31_bus_priv *priv; + int ret; + + ret = nouveau_bus_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->intr = nv31_bus_intr; + return 0; +} + +struct nouveau_oclass +nv31_bus_oclass = { + .handle = NV_SUBDEV(BUS, 0x31), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv31_bus_ctor, + .dtor = _nouveau_bus_dtor, + .init = nv31_bus_init, + .fini = _nouveau_bus_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c new file mode 100644 index 0000000..f5b2117 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c @@ -0,0 +1,105 @@ +/* + * Copyright 2012 Nouveau Community + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Martin Peres <martin.peres@labri.fr> + * Ben Skeggs + */ + +#include <subdev/bus.h> + +struct nv50_bus_priv { + struct nouveau_bus base; +}; + +static void +nv50_bus_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_bus *pbus = nouveau_bus(subdev); + u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140); + + if (stat & 0x00000008) { + u32 addr = nv_rd32(pbus, 0x009084); + u32 data = nv_rd32(pbus, 0x009088); + + nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n", + (addr & 0x00000002) ? "write" : "read", data, + (addr & 0x00fffffc)); + + stat &= ~0x00000008; + nv_wr32(pbus, 0x001100, 0x00000008); + } + + if (stat & 0x00010000) { + subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_THERM); + if (subdev && subdev->intr) + subdev->intr(subdev); + stat &= ~0x00010000; + nv_wr32(pbus, 0x001100, 0x00010000); + } + + if (stat) { + nv_error(pbus, "unknown intr 0x%08x\n", stat); + nv_mask(pbus, 0x001140, stat, 0); + } +} + +static int +nv50_bus_init(struct nouveau_object *object) +{ + struct nv50_bus_priv *priv = (void *)object; + int ret; + + ret = nouveau_bus_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x001100, 0xffffffff); + nv_wr32(priv, 0x001140, 0x00010008); + return 0; +} + +static int +nv50_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_bus_priv *priv; + int ret; + + ret = nouveau_bus_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->intr = nv50_bus_intr; + return 0; +} + +struct nouveau_oclass +nv50_bus_oclass = { + .handle = NV_SUBDEV(BUS, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_bus_ctor, + .dtor = _nouveau_bus_dtor, + .init = nv50_bus_init, + .fini = _nouveau_bus_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c new file mode 100644 index 0000000..b192d62 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c @@ -0,0 +1,101 @@ +/* + * Copyright 2012 Nouveau Community + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Martin Peres <martin.peres@labri.fr> + * Ben Skeggs + */ + +#include <subdev/bus.h> + +struct nvc0_bus_priv { + struct nouveau_bus base; +}; + +static void +nvc0_bus_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_bus *pbus = nouveau_bus(subdev); + u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140); + + if (stat & 0x0000000e) { + u32 addr = nv_rd32(pbus, 0x009084); + u32 data = nv_rd32(pbus, 0x009088); + + nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x [ %s%s%s]\n", + (addr & 0x00000002) ? "write" : "read", data, + (addr & 0x00fffffc), + (stat & 0x00000002) ? "!ENGINE " : "", + (stat & 0x00000004) ? "IBUS " : "", + (stat & 0x00000008) ? "TIMEOUT " : ""); + + nv_wr32(pbus, 0x009084, 0x00000000); + nv_wr32(pbus, 0x001100, (stat & 0x0000000e)); + stat &= ~0x0000000e; + } + + if (stat) { + nv_error(pbus, "unknown intr 0x%08x\n", stat); + nv_mask(pbus, 0x001140, stat, 0x00000000); + } +} + +static int +nvc0_bus_init(struct nouveau_object *object) +{ + struct nvc0_bus_priv *priv = (void *)object; + int ret; + + ret = nouveau_bus_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x001100, 0xffffffff); + nv_wr32(priv, 0x001140, 0x0000000e); + return 0; +} + +static int +nvc0_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_bus_priv *priv; + int ret; + + ret = nouveau_bus_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->intr = nvc0_bus_intr; + return 0; +} + +struct nouveau_oclass +nvc0_bus_oclass = { + .handle = NV_SUBDEV(BUS, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_bus_ctor, + .dtor = _nouveau_bus_dtor, + .init = nvc0_bus_init, + .fini = _nouveau_bus_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c new file mode 100644 index 0000000..b7fd115 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c @@ -0,0 +1,359 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/clock.h> +#include <subdev/bios.h> +#include <subdev/bios/pll.h> + +#include "pll.h" + +struct nv04_clock_priv { + struct nouveau_clock base; +}; + +static int +powerctrl_1_shift(int chip_version, int reg) +{ + int shift = -4; + + if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20) + return shift; + + switch (reg) { + case 0x680520: + shift += 4; + case 0x680508: + shift += 4; + case 0x680504: + shift += 4; + case 0x680500: + shift += 4; + } + + /* + * the shift for vpll regs is only used for nv3x chips with a single + * stage pll + */ + if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 || + chip_version == 0x36 || chip_version >= 0x40)) + shift = -4; + + return shift; +} + +static void +setPLL_single(struct nv04_clock_priv *priv, u32 reg, + struct nouveau_pll_vals *pv) +{ + int chip_version = nouveau_bios(priv)->version.chip; + uint32_t oldpll = nv_rd32(priv, reg); + int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff; + uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1; + uint32_t saved_powerctrl_1 = 0; + int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg); + + if (oldpll == pll) + return; /* already set */ + + if (shift_powerctrl_1 >= 0) { + saved_powerctrl_1 = nv_rd32(priv, 0x001584); + nv_wr32(priv, 0x001584, + (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) | + 1 << shift_powerctrl_1); + } + + if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1)) + /* upclock -- write new post divider first */ + nv_wr32(priv, reg, pv->log2P << 16 | (oldpll & 0xffff)); + else + /* downclock -- write new NM first */ + nv_wr32(priv, reg, (oldpll & 0xffff0000) | pv->NM1); + + if (chip_version < 0x17 && chip_version != 0x11) + /* wait a bit on older chips */ + msleep(64); + nv_rd32(priv, reg); + + /* then write the other half as well */ + nv_wr32(priv, reg, pll); + + if (shift_powerctrl_1 >= 0) + nv_wr32(priv, 0x001584, saved_powerctrl_1); +} + +static uint32_t +new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580) +{ + bool head_a = (reg1 == 0x680508); + + if (ss) /* single stage pll mode */ + ramdac580 |= head_a ? 0x00000100 : 0x10000000; + else + ramdac580 &= head_a ? 0xfffffeff : 0xefffffff; + + return ramdac580; +} + +static void +setPLL_double_highregs(struct nv04_clock_priv *priv, u32 reg1, + struct nouveau_pll_vals *pv) +{ + int chip_version = nouveau_bios(priv)->version.chip; + bool nv3035 = chip_version == 0x30 || chip_version == 0x35; + uint32_t reg2 = reg1 + ((reg1 == 0x680520) ? 0x5c : 0x70); + uint32_t oldpll1 = nv_rd32(priv, reg1); + uint32_t oldpll2 = !nv3035 ? nv_rd32(priv, reg2) : 0; + uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1; + uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2; + uint32_t oldramdac580 = 0, ramdac580 = 0; + bool single_stage = !pv->NM2 || pv->N2 == pv->M2; /* nv41+ only */ + uint32_t saved_powerctrl_1 = 0, savedc040 = 0; + int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1); + + /* model specific additions to generic pll1 and pll2 set up above */ + if (nv3035) { + pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 | + (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4; + pll2 = 0; + } + if (chip_version > 0x40 && reg1 >= 0x680508) { /* !nv40 */ + oldramdac580 = nv_rd32(priv, 0x680580); + ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580); + if (oldramdac580 != ramdac580) + oldpll1 = ~0; /* force mismatch */ + if (single_stage) + /* magic value used by nvidia in single stage mode */ + pll2 |= 0x011f; + } + if (chip_version > 0x70) + /* magic bits set by the blob (but not the bios) on g71-73 */ + pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28; + + if (oldpll1 == pll1 && oldpll2 == pll2) + return; /* already set */ + + if (shift_powerctrl_1 >= 0) { + saved_powerctrl_1 = nv_rd32(priv, 0x001584); + nv_wr32(priv, 0x001584, + (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) | + 1 << shift_powerctrl_1); + } + + if (chip_version >= 0x40) { + int shift_c040 = 14; + + switch (reg1) { + case 0x680504: + shift_c040 += 2; + case 0x680500: + shift_c040 += 2; + case 0x680520: + shift_c040 += 2; + case 0x680508: + shift_c040 += 2; + } + + savedc040 = nv_rd32(priv, 0xc040); + if (shift_c040 != 14) + nv_wr32(priv, 0xc040, savedc040 & ~(3 << shift_c040)); + } + + if (oldramdac580 != ramdac580) + nv_wr32(priv, 0x680580, ramdac580); + + if (!nv3035) + nv_wr32(priv, reg2, pll2); + nv_wr32(priv, reg1, pll1); + + if (shift_powerctrl_1 >= 0) + nv_wr32(priv, 0x001584, saved_powerctrl_1); + if (chip_version >= 0x40) + nv_wr32(priv, 0xc040, savedc040); +} + +static void +setPLL_double_lowregs(struct nv04_clock_priv *priv, u32 NMNMreg, + struct nouveau_pll_vals *pv) +{ + /* When setting PLLs, there is a merry game of disabling and enabling + * various bits of hardware during the process. This function is a + * synthesis of six nv4x traces, nearly each card doing a subtly + * different thing. With luck all the necessary bits for each card are + * combined herein. Without luck it deviates from each card's formula + * so as to not work on any :) + */ + + uint32_t Preg = NMNMreg - 4; + bool mpll = Preg == 0x4020; + uint32_t oldPval = nv_rd32(priv, Preg); + uint32_t NMNM = pv->NM2 << 16 | pv->NM1; + uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) | + 0xc << 28 | pv->log2P << 16; + uint32_t saved4600 = 0; + /* some cards have different maskc040s */ + uint32_t maskc040 = ~(3 << 14), savedc040; + bool single_stage = !pv->NM2 || pv->N2 == pv->M2; + + if (nv_rd32(priv, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval) + return; + + if (Preg == 0x4000) + maskc040 = ~0x333; + if (Preg == 0x4058) + maskc040 = ~(0xc << 24); + + if (mpll) { + struct nvbios_pll info; + uint8_t Pval2; + + if (nvbios_pll_parse(nouveau_bios(priv), Preg, &info)) + return; + + Pval2 = pv->log2P + info.bias_p; + if (Pval2 > info.max_p) + Pval2 = info.max_p; + Pval |= 1 << 28 | Pval2 << 20; + + saved4600 = nv_rd32(priv, 0x4600); + nv_wr32(priv, 0x4600, saved4600 | 8 << 28); + } + if (single_stage) + Pval |= mpll ? 1 << 12 : 1 << 8; + + nv_wr32(priv, Preg, oldPval | 1 << 28); + nv_wr32(priv, Preg, Pval & ~(4 << 28)); + if (mpll) { + Pval |= 8 << 20; + nv_wr32(priv, 0x4020, Pval & ~(0xc << 28)); + nv_wr32(priv, 0x4038, Pval & ~(0xc << 28)); + } + + savedc040 = nv_rd32(priv, 0xc040); + nv_wr32(priv, 0xc040, savedc040 & maskc040); + + nv_wr32(priv, NMNMreg, NMNM); + if (NMNMreg == 0x4024) + nv_wr32(priv, 0x403c, NMNM); + + nv_wr32(priv, Preg, Pval); + if (mpll) { + Pval &= ~(8 << 20); + nv_wr32(priv, 0x4020, Pval); + nv_wr32(priv, 0x4038, Pval); + nv_wr32(priv, 0x4600, saved4600); + } + + nv_wr32(priv, 0xc040, savedc040); + + if (mpll) { + nv_wr32(priv, 0x4020, Pval & ~(1 << 28)); + nv_wr32(priv, 0x4038, Pval & ~(1 << 28)); + } +} + +int +nv04_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq) +{ + struct nv04_clock_priv *priv = (void *)clk; + struct nouveau_pll_vals pv; + struct nvbios_pll info; + int ret; + + ret = nvbios_pll_parse(nouveau_bios(priv), type > 0x405c ? + type : type - 4, &info); + if (ret) + return ret; + + ret = clk->pll_calc(clk, &info, freq, &pv); + if (!ret) + return ret; + + return clk->pll_prog(clk, type, &pv); +} + +int +nv04_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info, + int clk, struct nouveau_pll_vals *pv) +{ + int N1, M1, N2, M2, P; + int ret = nv04_pll_calc(clock, info, clk, &N1, &M1, &N2, &M2, &P); + if (ret) { + pv->refclk = info->refclk; + pv->N1 = N1; + pv->M1 = M1; + pv->N2 = N2; + pv->M2 = M2; + pv->log2P = P; + } + return ret; +} + +int +nv04_clock_pll_prog(struct nouveau_clock *clk, u32 reg1, + struct nouveau_pll_vals *pv) +{ + struct nv04_clock_priv *priv = (void *)clk; + int cv = nouveau_bios(clk)->version.chip; + + if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 || + cv >= 0x40) { + if (reg1 > 0x405c) + setPLL_double_highregs(priv, reg1, pv); + else + setPLL_double_lowregs(priv, reg1, pv); + } else + setPLL_single(priv, reg1, pv); + + return 0; +} + +static int +nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_clock_priv *priv; + int ret; + + ret = nouveau_clock_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.pll_set = nv04_clock_pll_set; + priv->base.pll_calc = nv04_clock_pll_calc; + priv->base.pll_prog = nv04_clock_pll_prog; + return 0; +} + +struct nouveau_oclass +nv04_clock_oclass = { + .handle = NV_SUBDEV(CLOCK, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_clock_ctor, + .dtor = _nouveau_clock_dtor, + .init = _nouveau_clock_init, + .fini = _nouveau_clock_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c new file mode 100644 index 0000000..a4b2b7e --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c @@ -0,0 +1,59 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/clock.h> + +struct nv40_clock_priv { + struct nouveau_clock base; +}; + +static int +nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv40_clock_priv *priv; + int ret; + + ret = nouveau_clock_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.pll_set = nv04_clock_pll_set; + priv->base.pll_calc = nv04_clock_pll_calc; + priv->base.pll_prog = nv04_clock_pll_prog; + return 0; +} + +struct nouveau_oclass +nv40_clock_oclass = { + .handle = NV_SUBDEV(CLOCK, 0x40), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv40_clock_ctor, + .dtor = _nouveau_clock_dtor, + .init = _nouveau_clock_init, + .fini = _nouveau_clock_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c new file mode 100644 index 0000000..f4147f6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c @@ -0,0 +1,106 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/clock.h> +#include <subdev/bios.h> +#include <subdev/bios/pll.h> + +#include "pll.h" + +struct nv50_clock_priv { + struct nouveau_clock base; +}; + +static int +nv50_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq) +{ + struct nv50_clock_priv *priv = (void *)clk; + struct nouveau_bios *bios = nouveau_bios(priv); + struct nvbios_pll info; + int N1, M1, N2, M2, P; + int ret; + + ret = nvbios_pll_parse(bios, type, &info); + if (ret) { + nv_error(clk, "failed to retrieve pll data, %d\n", ret); + return ret; + } + + ret = nv04_pll_calc(clk, &info, freq, &N1, &M1, &N2, &M2, &P); + if (!ret) { + nv_error(clk, "failed pll calculation\n"); + return ret; + } + + switch (info.type) { + case PLL_VPLL0: + case PLL_VPLL1: + nv_wr32(priv, info.reg + 0, 0x10000611); + nv_mask(priv, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1); + nv_mask(priv, info.reg + 8, 0x7fff00ff, (P << 28) | + (M2 << 16) | N2); + break; + case PLL_MEMORY: + nv_mask(priv, info.reg + 0, 0x01ff0000, (P << 22) | + (info.bias_p << 19) | + (P << 16)); + nv_wr32(priv, info.reg + 4, (N1 << 8) | M1); + break; + default: + nv_mask(priv, info.reg + 0, 0x00070000, (P << 16)); + nv_wr32(priv, info.reg + 4, (N1 << 8) | M1); + break; + } + + return 0; +} + +static int +nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_clock_priv *priv; + int ret; + + ret = nouveau_clock_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.pll_set = nv50_clock_pll_set; + priv->base.pll_calc = nv04_clock_pll_calc; + return 0; +} + +struct nouveau_oclass +nv50_clock_oclass = { + .handle = NV_SUBDEV(CLOCK, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_clock_ctor, + .dtor = _nouveau_clock_dtor, + .init = _nouveau_clock_init, + .fini = _nouveau_clock_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c new file mode 100644 index 0000000..9068c98 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c @@ -0,0 +1,114 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/clock.h> +#include <subdev/bios.h> +#include <subdev/bios/pll.h> + +#include "pll.h" + +struct nva3_clock_priv { + struct nouveau_clock base; +}; + +static int +nva3_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq) +{ + struct nva3_clock_priv *priv = (void *)clk; + struct nouveau_bios *bios = nouveau_bios(priv); + struct nvbios_pll info; + int N, fN, M, P; + int ret; + + ret = nvbios_pll_parse(bios, type, &info); + if (ret) + return ret; + + ret = nva3_pll_calc(clk, &info, freq, &N, &fN, &M, &P); + if (ret < 0) + return ret; + + switch (info.type) { + case PLL_VPLL0: + case PLL_VPLL1: + nv_wr32(priv, info.reg + 0, 0x50000610); + nv_mask(priv, info.reg + 4, 0x003fffff, + (P << 16) | (M << 8) | N); + nv_wr32(priv, info.reg + 8, fN); + break; + default: + nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq); + ret = -EINVAL; + break; + } + + return ret; +} + +int +nva3_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info, + int clk, struct nouveau_pll_vals *pv) +{ + int ret, N, M, P; + + ret = nva3_pll_calc(clock, info, clk, &N, NULL, &M, &P); + + if (ret > 0) { + pv->refclk = info->refclk; + pv->N1 = N; + pv->M1 = M; + pv->log2P = P; + } + return ret; +} + + +static int +nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nva3_clock_priv *priv; + int ret; + + ret = nouveau_clock_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.pll_set = nva3_clock_pll_set; + priv->base.pll_calc = nva3_clock_pll_calc; + return 0; +} + +struct nouveau_oclass +nva3_clock_oclass = { + .handle = NV_SUBDEV(CLOCK, 0xa3), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nva3_clock_ctor, + .dtor = _nouveau_clock_dtor, + .init = _nouveau_clock_init, + .fini = _nouveau_clock_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c new file mode 100644 index 0000000..7c96262 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c @@ -0,0 +1,97 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/clock.h> +#include <subdev/bios.h> +#include <subdev/bios/pll.h> + +#include "pll.h" + +struct nvc0_clock_priv { + struct nouveau_clock base; +}; + +static int +nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq) +{ + struct nvc0_clock_priv *priv = (void *)clk; + struct nouveau_bios *bios = nouveau_bios(priv); + struct nvbios_pll info; + int N, fN, M, P; + int ret; + + ret = nvbios_pll_parse(bios, type, &info); + if (ret) + return ret; + + ret = nva3_pll_calc(clk, &info, freq, &N, &fN, &M, &P); + if (ret < 0) + return ret; + + switch (info.type) { + case PLL_VPLL0: + case PLL_VPLL1: + case PLL_VPLL2: + case PLL_VPLL3: + nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100); + nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M); + nv_wr32(priv, info.reg + 0x10, fN << 16); + break; + default: + nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_clock_priv *priv; + int ret; + + ret = nouveau_clock_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.pll_set = nvc0_clock_pll_set; + priv->base.pll_calc = nva3_clock_pll_calc; + return 0; +} + +struct nouveau_oclass +nvc0_clock_oclass = { + .handle = NV_SUBDEV(CLOCK, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_clock_ctor, + .dtor = _nouveau_clock_dtor, + .init = _nouveau_clock_init, + .fini = _nouveau_clock_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h b/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h new file mode 100644 index 0000000..ef2c007 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h @@ -0,0 +1,9 @@ +#ifndef __NOUVEAU_PLL_H__ +#define __NOUVEAU_PLL_H__ + +int nv04_pll_calc(struct nouveau_clock *, struct nvbios_pll *, u32 freq, + int *N1, int *M1, int *N2, int *M2, int *P); +int nva3_pll_calc(struct nouveau_clock *, struct nvbios_pll *, u32 freq, + int *N, int *fN, int *M, int *P); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c new file mode 100644 index 0000000..a2ab6d0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c @@ -0,0 +1,242 @@ +/* + * Copyright 1993-2003 NVIDIA, Corporation + * Copyright 2007-2009 Stuart Bennett + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF + * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <subdev/clock.h> +#include <subdev/bios.h> +#include <subdev/bios/pll.h> + +#include "pll.h" + +static int +getMNP_single(struct nouveau_clock *clock, struct nvbios_pll *info, int clk, + int *pN, int *pM, int *pP) +{ + /* Find M, N and P for a single stage PLL + * + * Note that some bioses (NV3x) have lookup tables of precomputed MNP + * values, but we're too lazy to use those atm + * + * "clk" parameter in kHz + * returns calculated clock + */ + int cv = nouveau_bios(clock)->version.chip; + int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq; + int minM = info->vco1.min_m, maxM = info->vco1.max_m; + int minN = info->vco1.min_n, maxN = info->vco1.max_n; + int minU = info->vco1.min_inputfreq; + int maxU = info->vco1.max_inputfreq; + int minP = info->min_p; + int maxP = info->max_p_usable; + int crystal = info->refclk; + int M, N, thisP, P; + int clkP, calcclk; + int delta, bestdelta = INT_MAX; + int bestclk = 0; + + /* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */ + /* possibly correlated with introduction of 27MHz crystal */ + if (cv < 0x17 || cv == 0x1a || cv == 0x20) { + if (clk > 250000) + maxM = 6; + if (clk > 340000) + maxM = 2; + } else if (cv < 0x40) { + if (clk > 150000) + maxM = 6; + if (clk > 200000) + maxM = 4; + if (clk > 340000) + maxM = 2; + } + + P = 1 << maxP; + if ((clk * P) < minvco) { + minvco = clk * maxP; + maxvco = minvco * 2; + } + + if (clk + clk/200 > maxvco) /* +0.5% */ + maxvco = clk + clk/200; + + /* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */ + for (thisP = minP; thisP <= maxP; thisP++) { + P = 1 << thisP; + clkP = clk * P; + + if (clkP < minvco) + continue; + if (clkP > maxvco) + return bestclk; + + for (M = minM; M <= maxM; M++) { + if (crystal/M < minU) + return bestclk; + if (crystal/M > maxU) + continue; + + /* add crystal/2 to round better */ + N = (clkP * M + crystal/2) / crystal; + + if (N < minN) + continue; + if (N > maxN) + break; + + /* more rounding additions */ + calcclk = ((N * crystal + P/2) / P + M/2) / M; + delta = abs(calcclk - clk); + /* we do an exhaustive search rather than terminating + * on an optimality condition... + */ + if (delta < bestdelta) { + bestdelta = delta; + bestclk = calcclk; + *pN = N; + *pM = M; + *pP = thisP; + if (delta == 0) /* except this one */ + return bestclk; + } + } + } + + return bestclk; +} + +static int +getMNP_double(struct nouveau_clock *clock, struct nvbios_pll *info, int clk, + int *pN1, int *pM1, int *pN2, int *pM2, int *pP) +{ + /* Find M, N and P for a two stage PLL + * + * Note that some bioses (NV30+) have lookup tables of precomputed MNP + * values, but we're too lazy to use those atm + * + * "clk" parameter in kHz + * returns calculated clock + */ + int chip_version = nouveau_bios(clock)->version.chip; + int minvco1 = info->vco1.min_freq, maxvco1 = info->vco1.max_freq; + int minvco2 = info->vco2.min_freq, maxvco2 = info->vco2.max_freq; + int minU1 = info->vco1.min_inputfreq, minU2 = info->vco2.min_inputfreq; + int maxU1 = info->vco1.max_inputfreq, maxU2 = info->vco2.max_inputfreq; + int minM1 = info->vco1.min_m, maxM1 = info->vco1.max_m; + int minN1 = info->vco1.min_n, maxN1 = info->vco1.max_n; + int minM2 = info->vco2.min_m, maxM2 = info->vco2.max_m; + int minN2 = info->vco2.min_n, maxN2 = info->vco2.max_n; + int maxlog2P = info->max_p_usable; + int crystal = info->refclk; + bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2); + int M1, N1, M2, N2, log2P; + int clkP, calcclk1, calcclk2, calcclkout; + int delta, bestdelta = INT_MAX; + int bestclk = 0; + + int vco2 = (maxvco2 - maxvco2/200) / 2; + for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++) + ; + clkP = clk << log2P; + + if (maxvco2 < clk + clk/200) /* +0.5% */ + maxvco2 = clk + clk/200; + + for (M1 = minM1; M1 <= maxM1; M1++) { + if (crystal/M1 < minU1) + return bestclk; + if (crystal/M1 > maxU1) + continue; + + for (N1 = minN1; N1 <= maxN1; N1++) { + calcclk1 = crystal * N1 / M1; + if (calcclk1 < minvco1) + continue; + if (calcclk1 > maxvco1) + break; + + for (M2 = minM2; M2 <= maxM2; M2++) { + if (calcclk1/M2 < minU2) + break; + if (calcclk1/M2 > maxU2) + continue; + + /* add calcclk1/2 to round better */ + N2 = (clkP * M2 + calcclk1/2) / calcclk1; + if (N2 < minN2) + continue; + if (N2 > maxN2) + break; + + if (!fixedgain2) { + if (chip_version < 0x60) + if (N2/M2 < 4 || N2/M2 > 10) + continue; + + calcclk2 = calcclk1 * N2 / M2; + if (calcclk2 < minvco2) + break; + if (calcclk2 > maxvco2) + continue; + } else + calcclk2 = calcclk1; + + calcclkout = calcclk2 >> log2P; + delta = abs(calcclkout - clk); + /* we do an exhaustive search rather than terminating + * on an optimality condition... + */ + if (delta < bestdelta) { + bestdelta = delta; + bestclk = calcclkout; + *pN1 = N1; + *pM1 = M1; + *pN2 = N2; + *pM2 = M2; + *pP = log2P; + if (delta == 0) /* except this one */ + return bestclk; + } + } + } + } + + return bestclk; +} + +int +nv04_pll_calc(struct nouveau_clock *clk, struct nvbios_pll *info, u32 freq, + int *N1, int *M1, int *N2, int *M2, int *P) +{ + int ret; + + if (!info->vco2.max_freq) { + ret = getMNP_single(clk, info, freq, N1, M1, P); + *N2 = 1; + *M2 = 1; + } else { + ret = getMNP_double(clk, info, freq, N1, M1, N2, M2, P); + } + + if (!ret) + nv_error(clk, "unable to compute acceptable pll values\n"); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c new file mode 100644 index 0000000..eed5c16 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c @@ -0,0 +1,80 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/clock.h> +#include <subdev/bios.h> +#include <subdev/bios/pll.h> + +#include "pll.h" + +int +nva3_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info, + u32 freq, int *pN, int *pfN, int *pM, int *P) +{ + u32 best_err = ~0, err; + int M, lM, hM, N, fN; + + *P = info->vco1.max_freq / freq; + if (*P > info->max_p) + *P = info->max_p; + if (*P < info->min_p) + *P = info->min_p; + + lM = (info->refclk + info->vco1.max_inputfreq) / info->vco1.max_inputfreq; + lM = max(lM, (int)info->vco1.min_m); + hM = (info->refclk + info->vco1.min_inputfreq) / info->vco1.min_inputfreq; + hM = min(hM, (int)info->vco1.max_m); + + for (M = lM; M <= hM; M++) { + u32 tmp = freq * *P * M; + N = tmp / info->refclk; + fN = tmp % info->refclk; + if (!pfN && fN >= info->refclk / 2) + N++; + + if (N < info->vco1.min_n) + continue; + if (N > info->vco1.max_n) + break; + + err = abs(freq - (info->refclk * N / M / *P)); + if (err < best_err) { + best_err = err; + *pN = N; + *pM = M; + } + + if (pfN) { + *pfN = (((fN << 13) / info->refclk) - 4096) & 0xffff; + return freq; + } + } + + if (unlikely(best_err == ~0)) { + nv_error(clock, "unable to find matching pll values\n"); + return -EINVAL; + } + + return info->refclk * *pN / *pM / *P; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c new file mode 100644 index 0000000..5a07a39 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c @@ -0,0 +1,69 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/option.h> + +#include <subdev/devinit.h> +#include <subdev/bios.h> +#include <subdev/bios/init.h> + +int +nouveau_devinit_init(struct nouveau_devinit *devinit) +{ + int ret = nouveau_subdev_init(&devinit->base); + if (ret) + return ret; + + return nvbios_init(&devinit->base, devinit->post); +} + +int +nouveau_devinit_fini(struct nouveau_devinit *devinit, bool suspend) +{ + /* force full reinit on resume */ + if (suspend) + devinit->post = true; + + return nouveau_subdev_fini(&devinit->base, suspend); +} + +int +nouveau_devinit_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, + int size, void **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct nouveau_devinit *devinit; + int ret; + + ret = nouveau_subdev_create_(parent, engine, oclass, 0, "DEVINIT", + "init", size, pobject); + devinit = *pobject; + if (ret) + return ret; + + devinit->post = nouveau_boolopt(device->cfgopt, "NvForcePost", false); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h new file mode 100644 index 0000000..6b56a0f --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#define NV04_PFB_BOOT_0 0x00100000 +# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003 +# define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB 0x00000000 +# define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB 0x00000001 +# define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB 0x00000002 +# define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB 0x00000003 +# define NV04_PFB_BOOT_0_RAM_WIDTH_128 0x00000004 +# define NV04_PFB_BOOT_0_RAM_TYPE 0x00000028 +# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT 0x00000000 +# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT 0x00000008 +# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK 0x00000010 +# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT 0x00000018 +# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT 0x00000020 +# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16 0x00000028 +# define NV04_PFB_BOOT_0_UMA_ENABLE 0x00000100 +# define NV04_PFB_BOOT_0_UMA_SIZE 0x0000f000 +#define NV04_PFB_DEBUG_0 0x00100080 +# define NV04_PFB_DEBUG_0_PAGE_MODE 0x00000001 +# define NV04_PFB_DEBUG_0_REFRESH_OFF 0x00000010 +# define NV04_PFB_DEBUG_0_REFRESH_COUNTX64 0x00003f00 +# define NV04_PFB_DEBUG_0_REFRESH_SLOW_CLK 0x00004000 +# define NV04_PFB_DEBUG_0_SAFE_MODE 0x00008000 +# define NV04_PFB_DEBUG_0_ALOM_ENABLE 0x00010000 +# define NV04_PFB_DEBUG_0_CASOE 0x00100000 +# define NV04_PFB_DEBUG_0_CKE_INVERT 0x10000000 +# define NV04_PFB_DEBUG_0_REFINC 0x20000000 +# define NV04_PFB_DEBUG_0_SAVE_POWER_OFF 0x40000000 +#define NV04_PFB_CFG0 0x00100200 +# define NV04_PFB_CFG0_SCRAMBLE 0x20000000 +#define NV04_PFB_CFG1 0x00100204 +#define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i)) + +#define NV10_PFB_REFCTRL 0x00100210 +# define NV10_PFB_REFCTRL_VALID_1 (1 << 31) + +static inline struct io_mapping * +fbmem_init(struct pci_dev *pdev) +{ + return io_mapping_create_wc(pci_resource_start(pdev, 1), + pci_resource_len(pdev, 1)); +} + +static inline void +fbmem_fini(struct io_mapping *fb) +{ + io_mapping_free(fb); +} + +static inline u32 +fbmem_peek(struct io_mapping *fb, u32 off) +{ + u8 __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK); + u32 val = ioread32(p + (off & ~PAGE_MASK)); + io_mapping_unmap_atomic(p); + return val; +} + +static inline void +fbmem_poke(struct io_mapping *fb, u32 off, u32 val) +{ + u8 __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK); + iowrite32(val, p + (off & ~PAGE_MASK)); + wmb(); + io_mapping_unmap_atomic(p); +} + +static inline bool +fbmem_readback(struct io_mapping *fb, u32 off, u32 val) +{ + fbmem_poke(fb, off, val); + return val == fbmem_peek(fb, off); +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c new file mode 100644 index 0000000..7a72d93 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c @@ -0,0 +1,189 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/devinit.h> +#include <subdev/vga.h> + +#include "fbmem.h" + +struct nv04_devinit_priv { + struct nouveau_devinit base; + int owner; +}; + +static void +nv04_devinit_meminit(struct nouveau_devinit *devinit) +{ + struct nv04_devinit_priv *priv = (void *)devinit; + u32 patt = 0xdeadbeef; + struct io_mapping *fb; + int i; + + /* Map the framebuffer aperture */ + fb = fbmem_init(nv_device(priv)->pdev); + if (!fb) { + nv_error(priv, "failed to map fb\n"); + return; + } + + /* Sequencer and refresh off */ + nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20); + nv_mask(priv, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF); + + nv_mask(priv, NV04_PFB_BOOT_0, ~0, + NV04_PFB_BOOT_0_RAM_AMOUNT_16MB | + NV04_PFB_BOOT_0_RAM_WIDTH_128 | + NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT); + + for (i = 0; i < 4; i++) + fbmem_poke(fb, 4 * i, patt); + + fbmem_poke(fb, 0x400000, patt + 1); + + if (fbmem_peek(fb, 0) == patt + 1) { + nv_mask(priv, NV04_PFB_BOOT_0, + NV04_PFB_BOOT_0_RAM_TYPE, + NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT); + nv_mask(priv, NV04_PFB_DEBUG_0, + NV04_PFB_DEBUG_0_REFRESH_OFF, 0); + + for (i = 0; i < 4; i++) + fbmem_poke(fb, 4 * i, patt); + + if ((fbmem_peek(fb, 0xc) & 0xffff) != (patt & 0xffff)) + nv_mask(priv, NV04_PFB_BOOT_0, + NV04_PFB_BOOT_0_RAM_WIDTH_128 | + NV04_PFB_BOOT_0_RAM_AMOUNT, + NV04_PFB_BOOT_0_RAM_AMOUNT_8MB); + } else + if ((fbmem_peek(fb, 0xc) & 0xffff0000) != (patt & 0xffff0000)) { + nv_mask(priv, NV04_PFB_BOOT_0, + NV04_PFB_BOOT_0_RAM_WIDTH_128 | + NV04_PFB_BOOT_0_RAM_AMOUNT, + NV04_PFB_BOOT_0_RAM_AMOUNT_4MB); + } else + if (fbmem_peek(fb, 0) != patt) { + if (fbmem_readback(fb, 0x800000, patt)) + nv_mask(priv, NV04_PFB_BOOT_0, + NV04_PFB_BOOT_0_RAM_AMOUNT, + NV04_PFB_BOOT_0_RAM_AMOUNT_8MB); + else + nv_mask(priv, NV04_PFB_BOOT_0, + NV04_PFB_BOOT_0_RAM_AMOUNT, + NV04_PFB_BOOT_0_RAM_AMOUNT_4MB); + + nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE, + NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT); + } else + if (!fbmem_readback(fb, 0x800000, patt)) { + nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT, + NV04_PFB_BOOT_0_RAM_AMOUNT_8MB); + + } + + /* Refresh on, sequencer on */ + nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0); + nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20); + fbmem_fini(fb); +} + +static int +nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_devinit_priv *priv; + int ret; + + ret = nouveau_devinit_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.meminit = nv04_devinit_meminit; + priv->owner = -1; + return 0; +} + +void +nv04_devinit_dtor(struct nouveau_object *object) +{ + struct nv04_devinit_priv *priv = (void *)object; + + /* restore vga owner saved at first init, and lock crtc regs */ + nv_wrvgaowner(priv, priv->owner); + nv_lockvgac(priv, true); + + nouveau_devinit_destroy(&priv->base); +} + +int +nv04_devinit_init(struct nouveau_object *object) +{ + struct nv04_devinit_priv *priv = (void *)object; + + if (!priv->base.post) { + u32 htotal = nv_rdvgac(priv, 0, 0x06); + htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x01) << 8; + htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x20) << 4; + htotal |= (nv_rdvgac(priv, 0, 0x25) & 0x01) << 10; + htotal |= (nv_rdvgac(priv, 0, 0x41) & 0x01) << 11; + if (!htotal) { + nv_info(priv, "adaptor not initialised\n"); + priv->base.post = true; + } + } + + return nouveau_devinit_init(&priv->base); +} + +int +nv04_devinit_fini(struct nouveau_object *object, bool suspend) +{ + struct nv04_devinit_priv *priv = (void *)object; + + /* make i2c busses accessible */ + nv_mask(priv, 0x000200, 0x00000001, 0x00000001); + + /* unlock extended vga crtc regs, and unslave crtcs */ + nv_lockvgac(priv, false); + if (priv->owner < 0) + priv->owner = nv_rdvgaowner(priv); + nv_wrvgaowner(priv, 0); + + return nouveau_devinit_fini(&priv->base, suspend); +} + +struct nouveau_oclass +nv04_devinit_oclass = { + .handle = NV_SUBDEV(DEVINIT, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_devinit_ctor, + .dtor = nv04_devinit_dtor, + .init = nv04_devinit_init, + .fini = nv04_devinit_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c new file mode 100644 index 0000000..191447d --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c @@ -0,0 +1,159 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/devinit.h> +#include <subdev/bios.h> +#include <subdev/bios/bmp.h> +#include <subdev/vga.h> + +#include "fbmem.h" + +struct nv05_devinit_priv { + struct nouveau_devinit base; + u8 owner; +}; + +static void +nv05_devinit_meminit(struct nouveau_devinit *devinit) +{ + static const u8 default_config_tab[][2] = { + { 0x24, 0x00 }, + { 0x28, 0x00 }, + { 0x24, 0x01 }, + { 0x1f, 0x00 }, + { 0x0f, 0x00 }, + { 0x17, 0x00 }, + { 0x06, 0x00 }, + { 0x00, 0x00 } + }; + struct nv05_devinit_priv *priv = (void *)devinit; + struct nouveau_bios *bios = nouveau_bios(priv); + struct io_mapping *fb; + u32 patt = 0xdeadbeef; + u16 data; + u8 strap, ramcfg[2]; + int i, v; + + /* Map the framebuffer aperture */ + fb = fbmem_init(nv_device(priv)->pdev); + if (!fb) { + nv_error(priv, "failed to map fb\n"); + return; + } + + strap = (nv_rd32(priv, 0x101000) & 0x0000003c) >> 2; + if ((data = bmp_mem_init_table(bios))) { + ramcfg[0] = nv_ro08(bios, data + 2 * strap + 0); + ramcfg[1] = nv_ro08(bios, data + 2 * strap + 1); + } else { + ramcfg[0] = default_config_tab[strap][0]; + ramcfg[1] = default_config_tab[strap][1]; + } + + /* Sequencer off */ + nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20); + + if (nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE) + goto out; + + nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0); + + /* If present load the hardcoded scrambling table */ + if (data) { + for (i = 0, data += 0x10; i < 8; i++, data += 4) { + u32 scramble = nv_ro32(bios, data); + nv_wr32(priv, NV04_PFB_SCRAMBLE(i), scramble); + } + } + + /* Set memory type/width/length defaults depending on the straps */ + nv_mask(priv, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]); + + if (ramcfg[1] & 0x80) + nv_mask(priv, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE); + + nv_mask(priv, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20); + nv_mask(priv, NV04_PFB_CFG1, 0, 1); + + /* Probe memory bus width */ + for (i = 0; i < 4; i++) + fbmem_poke(fb, 4 * i, patt); + + if (fbmem_peek(fb, 0xc) != patt) + nv_mask(priv, NV04_PFB_BOOT_0, + NV04_PFB_BOOT_0_RAM_WIDTH_128, 0); + + /* Probe memory length */ + v = nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT; + + if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB && + (!fbmem_readback(fb, 0x1000000, ++patt) || + !fbmem_readback(fb, 0, ++patt))) + nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT, + NV04_PFB_BOOT_0_RAM_AMOUNT_16MB); + + if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB && + !fbmem_readback(fb, 0x800000, ++patt)) + nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT, + NV04_PFB_BOOT_0_RAM_AMOUNT_8MB); + + if (!fbmem_readback(fb, 0x400000, ++patt)) + nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT, + NV04_PFB_BOOT_0_RAM_AMOUNT_4MB); + +out: + /* Sequencer on */ + nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20); + fbmem_fini(fb); +} + +static int +nv05_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv05_devinit_priv *priv; + int ret; + + ret = nouveau_devinit_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.meminit = nv05_devinit_meminit; + return 0; +} + +struct nouveau_oclass +nv05_devinit_oclass = { + .handle = NV_SUBDEV(DEVINIT, 0x05), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv05_devinit_ctor, + .dtor = nv04_devinit_dtor, + .init = nv04_devinit_init, + .fini = nv04_devinit_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c new file mode 100644 index 0000000..eb76ffa --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/devinit.h> +#include <subdev/vga.h> + +#include "fbmem.h" + +struct nv10_devinit_priv { + struct nouveau_devinit base; + u8 owner; +}; + +static void +nv10_devinit_meminit(struct nouveau_devinit *devinit) +{ + struct nv10_devinit_priv *priv = (void *)devinit; + const int mem_width[] = { 0x10, 0x00, 0x20 }; + const int mem_width_count = nv_device(priv)->chipset >= 0x17 ? 3 : 2; + uint32_t patt = 0xdeadbeef; + struct io_mapping *fb; + int i, j, k; + + /* Map the framebuffer aperture */ + fb = fbmem_init(nv_device(priv)->pdev); + if (!fb) { + nv_error(priv, "failed to map fb\n"); + return; + } + + nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1); + + /* Probe memory bus width */ + for (i = 0; i < mem_width_count; i++) { + nv_mask(priv, NV04_PFB_CFG0, 0x30, mem_width[i]); + + for (j = 0; j < 4; j++) { + for (k = 0; k < 4; k++) + fbmem_poke(fb, 0x1c, 0); + + fbmem_poke(fb, 0x1c, patt); + fbmem_poke(fb, 0x3c, 0); + + if (fbmem_peek(fb, 0x1c) == patt) + goto mem_width_found; + } + } + +mem_width_found: + patt <<= 1; + + /* Probe amount of installed memory */ + for (i = 0; i < 4; i++) { + int off = nv_rd32(priv, 0x10020c) - 0x100000; + + fbmem_poke(fb, off, patt); + fbmem_poke(fb, 0, 0); + + fbmem_peek(fb, 0); + fbmem_peek(fb, 0); + fbmem_peek(fb, 0); + fbmem_peek(fb, 0); + + if (fbmem_peek(fb, off) == patt) + goto amount_found; + } + + /* IC missing - disable the upper half memory space. */ + nv_mask(priv, NV04_PFB_CFG0, 0x1000, 0); + +amount_found: + fbmem_fini(fb); +} + +static int +nv10_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv10_devinit_priv *priv; + int ret; + + ret = nouveau_devinit_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.meminit = nv10_devinit_meminit; + return 0; +} + +struct nouveau_oclass +nv10_devinit_oclass = { + .handle = NV_SUBDEV(DEVINIT, 0x10), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv10_devinit_ctor, + .dtor = nv04_devinit_dtor, + .init = nv04_devinit_init, + .fini = nv04_devinit_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c new file mode 100644 index 0000000..5b2ba63 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c @@ -0,0 +1,58 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/devinit.h> +#include <subdev/vga.h> + +struct nv1a_devinit_priv { + struct nouveau_devinit base; + u8 owner; +}; + +static int +nv1a_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv1a_devinit_priv *priv; + int ret; + + ret = nouveau_devinit_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +struct nouveau_oclass +nv1a_devinit_oclass = { + .handle = NV_SUBDEV(DEVINIT, 0x1a), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv1a_devinit_ctor, + .dtor = nv04_devinit_dtor, + .init = nv04_devinit_init, + .fini = nv04_devinit_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c new file mode 100644 index 0000000..eb32e99 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/devinit.h> +#include <subdev/vga.h> + +#include "fbmem.h" + +struct nv20_devinit_priv { + struct nouveau_devinit base; + u8 owner; +}; + +static void +nv20_devinit_meminit(struct nouveau_devinit *devinit) +{ + struct nv20_devinit_priv *priv = (void *)devinit; + struct nouveau_device *device = nv_device(priv); + uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900); + uint32_t amount, off; + struct io_mapping *fb; + + /* Map the framebuffer aperture */ + fb = fbmem_init(nv_device(priv)->pdev); + if (!fb) { + nv_error(priv, "failed to map fb\n"); + return; + } + + nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1); + + /* Allow full addressing */ + nv_mask(priv, NV04_PFB_CFG0, 0, mask); + + amount = nv_rd32(priv, 0x10020c); + for (off = amount; off > 0x2000000; off -= 0x2000000) + fbmem_poke(fb, off - 4, off); + + amount = nv_rd32(priv, 0x10020c); + if (amount != fbmem_peek(fb, amount - 4)) + /* IC missing - disable the upper half memory space. */ + nv_mask(priv, NV04_PFB_CFG0, mask, 0); + + fbmem_fini(fb); +} + +static int +nv20_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv20_devinit_priv *priv; + int ret; + + ret = nouveau_devinit_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.meminit = nv20_devinit_meminit; + return 0; +} + +struct nouveau_oclass +nv20_devinit_oclass = { + .handle = NV_SUBDEV(DEVINIT, 0x20), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv20_devinit_ctor, + .dtor = nv04_devinit_dtor, + .init = nv04_devinit_init, + .fini = nv04_devinit_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c new file mode 100644 index 0000000..4a85778 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c @@ -0,0 +1,121 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/bios.h> +#include <subdev/bios/dcb.h> +#include <subdev/bios/disp.h> +#include <subdev/bios/init.h> +#include <subdev/devinit.h> +#include <subdev/vga.h> + +struct nv50_devinit_priv { + struct nouveau_devinit base; +}; + +static int +nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_devinit_priv *priv; + int ret; + + ret = nouveau_devinit_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +static void +nv50_devinit_dtor(struct nouveau_object *object) +{ + struct nv50_devinit_priv *priv = (void *)object; + nouveau_devinit_destroy(&priv->base); +} + +static int +nv50_devinit_init(struct nouveau_object *object) +{ + struct nouveau_bios *bios = nouveau_bios(object); + struct nv50_devinit_priv *priv = (void *)object; + struct nvbios_outp info; + struct dcb_output outp; + u8 ver = 0xff, hdr, cnt, len; + int ret, i = 0; + + if (!priv->base.post) { + if (!nv_rdvgac(priv, 0, 0x00) && + !nv_rdvgac(priv, 0, 0x1a)) { + nv_info(priv, "adaptor not initialised\n"); + priv->base.post = true; + } + } + + ret = nouveau_devinit_init(&priv->base); + if (ret) + return ret; + + /* if we ran the init tables, we have to execute the first script + * pointer of each dcb entry's display encoder table in order + * to properly initialise each encoder. + */ + while (priv->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) { + if (nvbios_outp_match(bios, outp.hasht, outp.hashm, + &ver, &hdr, &cnt, &len, &info)) { + struct nvbios_init init = { + .subdev = nv_subdev(priv), + .bios = bios, + .offset = info.script[0], + .outp = &outp, + .crtc = -1, + .execute = 1, + }; + + nvbios_exec(&init); + } + i++; + } + + return 0; +} + +static int +nv50_devinit_fini(struct nouveau_object *object, bool suspend) +{ + struct nv50_devinit_priv *priv = (void *)object; + return nouveau_devinit_fini(&priv->base, suspend); +} + +struct nouveau_oclass +nv50_devinit_oclass = { + .handle = NV_SUBDEV(DEVINIT, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_devinit_ctor, + .dtor = nv50_devinit_dtor, + .init = nv50_devinit_init, + .fini = nv50_devinit_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c new file mode 100644 index 0000000..d62045f --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c @@ -0,0 +1,140 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "subdev/fb.h" +#include "subdev/bios.h" +#include "subdev/bios/bit.h" + +int +nouveau_fb_bios_memtype(struct nouveau_bios *bios) +{ + struct bit_entry M; + u8 ramcfg; + + ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2; + if (!bit_entry(bios, 'M', &M) && M.version == 2 && M.length >= 5) { + u16 table = nv_ro16(bios, M.offset + 3); + u8 version = nv_ro08(bios, table + 0); + u8 header = nv_ro08(bios, table + 1); + u8 record = nv_ro08(bios, table + 2); + u8 entries = nv_ro08(bios, table + 3); + if (table && version == 0x10 && ramcfg < entries) { + u16 entry = table + header + (ramcfg * record); + switch (nv_ro08(bios, entry) & 0x0f) { + case 0: return NV_MEM_TYPE_DDR2; + case 1: return NV_MEM_TYPE_DDR3; + case 2: return NV_MEM_TYPE_GDDR3; + case 3: return NV_MEM_TYPE_GDDR5; + default: + break; + } + + } + } + + return NV_MEM_TYPE_UNKNOWN; +} + +int +nouveau_fb_preinit(struct nouveau_fb *pfb) +{ + static const char *name[] = { + [NV_MEM_TYPE_UNKNOWN] = "unknown", + [NV_MEM_TYPE_STOLEN ] = "stolen system memory", + [NV_MEM_TYPE_SGRAM ] = "SGRAM", + [NV_MEM_TYPE_SDRAM ] = "SDRAM", + [NV_MEM_TYPE_DDR1 ] = "DDR1", + [NV_MEM_TYPE_DDR2 ] = "DDR2", + [NV_MEM_TYPE_DDR3 ] = "DDR3", + [NV_MEM_TYPE_GDDR2 ] = "GDDR2", + [NV_MEM_TYPE_GDDR3 ] = "GDDR3", + [NV_MEM_TYPE_GDDR4 ] = "GDDR4", + [NV_MEM_TYPE_GDDR5 ] = "GDDR5", + }; + int ret, tags; + + tags = pfb->ram.init(pfb); + if (tags < 0 || !pfb->ram.size) { + nv_fatal(pfb, "error detecting memory configuration!!\n"); + return (tags < 0) ? tags : -ERANGE; + } + + if (!nouveau_mm_initialised(&pfb->vram)) { + ret = nouveau_mm_init(&pfb->vram, 0, pfb->ram.size >> 12, 1); + if (ret) + return ret; + } + + if (!nouveau_mm_initialised(&pfb->tags)) { + ret = nouveau_mm_init(&pfb->tags, 0, tags ? ++tags : 0, 1); + if (ret) + return ret; + } + + nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]); + nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20)); + nv_info(pfb, " ZCOMP: %d tags\n", tags); + return 0; +} + +void +nouveau_fb_destroy(struct nouveau_fb *pfb) +{ + int i; + + for (i = 0; i < pfb->tile.regions; i++) + pfb->tile.fini(pfb, i, &pfb->tile.region[i]); + nouveau_mm_fini(&pfb->tags); + nouveau_mm_fini(&pfb->vram); + + nouveau_subdev_destroy(&pfb->base); +} + +void +_nouveau_fb_dtor(struct nouveau_object *object) +{ + struct nouveau_fb *pfb = (void *)object; + nouveau_fb_destroy(pfb); +} +int +nouveau_fb_init(struct nouveau_fb *pfb) +{ + int ret, i; + + ret = nouveau_subdev_init(&pfb->base); + if (ret) + return ret; + + for (i = 0; i < pfb->tile.regions; i++) + pfb->tile.prog(pfb, i, &pfb->tile.region[i]); + + return 0; +} + +int +_nouveau_fb_init(struct nouveau_object *object) +{ + struct nouveau_fb *pfb = (void *)object; + return nouveau_fb_init(pfb); +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c new file mode 100644 index 0000000..6e369f8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c @@ -0,0 +1,134 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/fb.h> + +#define NV04_PFB_BOOT_0 0x00100000 +# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003 +# define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB 0x00000000 +# define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB 0x00000001 +# define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB 0x00000002 +# define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB 0x00000003 +# define NV04_PFB_BOOT_0_RAM_WIDTH_128 0x00000004 +# define NV04_PFB_BOOT_0_RAM_TYPE 0x00000028 +# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT 0x00000000 +# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT 0x00000008 +# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK 0x00000010 +# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT 0x00000018 +# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT 0x00000020 +# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16 0x00000028 +# define NV04_PFB_BOOT_0_UMA_ENABLE 0x00000100 +# define NV04_PFB_BOOT_0_UMA_SIZE 0x0000f000 +#define NV04_PFB_CFG0 0x00100200 + +struct nv04_fb_priv { + struct nouveau_fb base; +}; + +bool +nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags) +{ + if (!(tile_flags & 0xff00)) + return true; + + return false; +} + +static int +nv04_fb_vram_init(struct nouveau_fb *pfb) +{ + u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0); + if (boot0 & 0x00000100) { + pfb->ram.size = ((boot0 >> 12) & 0xf) * 2 + 2; + pfb->ram.size *= 1024 * 1024; + } else { + switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) { + case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB: + pfb->ram.size = 32 * 1024 * 1024; + break; + case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB: + pfb->ram.size = 16 * 1024 * 1024; + break; + case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB: + pfb->ram.size = 8 * 1024 * 1024; + break; + case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB: + pfb->ram.size = 4 * 1024 * 1024; + break; + } + } + + if ((boot0 & 0x00000038) <= 0x10) + pfb->ram.type = NV_MEM_TYPE_SGRAM; + else + pfb->ram.type = NV_MEM_TYPE_SDRAM; + return 0; +} + +static int +nv04_fb_init(struct nouveau_object *object) +{ + struct nv04_fb_priv *priv = (void *)object; + int ret; + + ret = nouveau_fb_init(&priv->base); + if (ret) + return ret; + + /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows + * nvidia reading PFB_CFG_0, then writing back its original value. + * (which was 0x701114 in this case) + */ + nv_wr32(priv, NV04_PFB_CFG0, 0x1114); + return 0; +} + +static int +nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_fb_priv *priv; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.memtype_valid = nv04_fb_memtype_valid; + priv->base.ram.init = nv04_fb_vram_init; + return nouveau_fb_preinit(&priv->base); +} + +struct nouveau_oclass +nv04_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_fb_ctor, + .dtor = _nouveau_fb_dtor, + .init = nv04_fb_init, + .fini = _nouveau_fb_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c new file mode 100644 index 0000000..edbbe26 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/fb.h> + +struct nv10_fb_priv { + struct nouveau_fb base; +}; + +static int +nv10_fb_vram_init(struct nouveau_fb *pfb) +{ + u32 cfg0 = nv_rd32(pfb, 0x100200); + if (cfg0 & 0x00000001) + pfb->ram.type = NV_MEM_TYPE_DDR1; + else + pfb->ram.type = NV_MEM_TYPE_SDRAM; + + pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; + return 0; +} + +void +nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, + u32 flags, struct nouveau_fb_tile *tile) +{ + tile->addr = 0x80000000 | addr; + tile->limit = max(1u, addr + size) - 1; + tile->pitch = pitch; +} + +void +nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) +{ + tile->addr = 0; + tile->limit = 0; + tile->pitch = 0; + tile->zcomp = 0; +} + +void +nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) +{ + nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit); + nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch); + nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr); + nv_rd32(pfb, 0x100240 + (i * 0x10)); +} + +static int +nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv10_fb_priv *priv; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.memtype_valid = nv04_fb_memtype_valid; + priv->base.ram.init = nv10_fb_vram_init; + priv->base.tile.regions = 8; + priv->base.tile.init = nv10_fb_tile_init; + priv->base.tile.fini = nv10_fb_tile_fini; + priv->base.tile.prog = nv10_fb_tile_prog; + return nouveau_fb_preinit(&priv->base); +} + +struct nouveau_oclass +nv10_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x10), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv10_fb_ctor, + .dtor = _nouveau_fb_dtor, + .init = _nouveau_fb_init, + .fini = _nouveau_fb_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c new file mode 100644 index 0000000..4836684 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/fb.h> + +struct nv1a_fb_priv { + struct nouveau_fb base; +}; + +static int +nv1a_fb_vram_init(struct nouveau_fb *pfb) +{ + struct pci_dev *bridge; + u32 mem, mib; + + bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1)); + if (!bridge) { + nv_fatal(pfb, "no bridge device\n"); + return -ENODEV; + } + + if (nv_device(pfb)->chipset == 0x1a) { + pci_read_config_dword(bridge, 0x7c, &mem); + mib = ((mem >> 6) & 31) + 1; + } else { + pci_read_config_dword(bridge, 0x84, &mem); + mib = ((mem >> 4) & 127) + 1; + } + + pfb->ram.type = NV_MEM_TYPE_STOLEN; + pfb->ram.size = mib * 1024 * 1024; + return 0; +} + +static int +nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv1a_fb_priv *priv; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.memtype_valid = nv04_fb_memtype_valid; + priv->base.ram.init = nv1a_fb_vram_init; + priv->base.tile.regions = 8; + priv->base.tile.init = nv10_fb_tile_init; + priv->base.tile.fini = nv10_fb_tile_fini; + priv->base.tile.prog = nv10_fb_tile_prog; + return nouveau_fb_preinit(&priv->base); +} + +struct nouveau_oclass +nv1a_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x1a), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv1a_fb_ctor, + .dtor = _nouveau_fb_dtor, + .init = _nouveau_fb_init, + .fini = _nouveau_fb_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c new file mode 100644 index 0000000..5d14612 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c @@ -0,0 +1,132 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/fb.h> + +struct nv20_fb_priv { + struct nouveau_fb base; +}; + +int +nv20_fb_vram_init(struct nouveau_fb *pfb) +{ + u32 pbus1218 = nv_rd32(pfb, 0x001218); + + switch (pbus1218 & 0x00000300) { + case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break; + case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break; + case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break; + case 0x00000300: pfb->ram.type = NV_MEM_TYPE_GDDR2; break; + } + pfb->ram.size = (nv_rd32(pfb, 0x10020c) & 0xff000000); + pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; + + return nv_rd32(pfb, 0x100320); +} + +void +nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, + u32 flags, struct nouveau_fb_tile *tile) +{ + tile->addr = 0x00000001 | addr; + tile->limit = max(1u, addr + size) - 1; + tile->pitch = pitch; + if (flags & 4) { + pfb->tile.comp(pfb, i, size, flags, tile); + tile->addr |= 2; + } +} + +static void +nv20_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, + struct nouveau_fb_tile *tile) +{ + u32 tiles = DIV_ROUND_UP(size, 0x40); + u32 tags = round_up(tiles / pfb->ram.parts, 0x40); + if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { + if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */ + else tile->zcomp = 0x04000000; /* Z24S8 */ + tile->zcomp |= tile->tag->offset; + tile->zcomp |= 0x80000000; /* enable */ +#ifdef __BIG_ENDIAN + tile->zcomp |= 0x08000000; +#endif + } +} + +void +nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) +{ + tile->addr = 0; + tile->limit = 0; + tile->pitch = 0; + tile->zcomp = 0; + nouveau_mm_free(&pfb->tags, &tile->tag); +} + +void +nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) +{ + nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit); + nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch); + nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr); + nv_rd32(pfb, 0x100240 + (i * 0x10)); + nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp); +} + +static int +nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv20_fb_priv *priv; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.memtype_valid = nv04_fb_memtype_valid; + priv->base.ram.init = nv20_fb_vram_init; + priv->base.tile.regions = 8; + priv->base.tile.init = nv20_fb_tile_init; + priv->base.tile.comp = nv20_fb_tile_comp; + priv->base.tile.fini = nv20_fb_tile_fini; + priv->base.tile.prog = nv20_fb_tile_prog; + return nouveau_fb_preinit(&priv->base); +} + +struct nouveau_oclass +nv20_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x20), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv20_fb_ctor, + .dtor = _nouveau_fb_dtor, + .init = _nouveau_fb_init, + .fini = _nouveau_fb_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c new file mode 100644 index 0000000..0042ace --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/fb.h> + +struct nv25_fb_priv { + struct nouveau_fb base; +}; + +static void +nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, + struct nouveau_fb_tile *tile) +{ + u32 tiles = DIV_ROUND_UP(size, 0x40); + u32 tags = round_up(tiles / pfb->ram.parts, 0x40); + if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { + if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */ + else tile->zcomp = 0x00200000; /* Z24S8 */ + tile->zcomp |= tile->tag->offset; +#ifdef __BIG_ENDIAN + tile->zcomp |= 0x01000000; +#endif + } +} + +static int +nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv25_fb_priv *priv; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.memtype_valid = nv04_fb_memtype_valid; + priv->base.ram.init = nv20_fb_vram_init; + priv->base.tile.regions = 8; + priv->base.tile.init = nv20_fb_tile_init; + priv->base.tile.comp = nv25_fb_tile_comp; + priv->base.tile.fini = nv20_fb_tile_fini; + priv->base.tile.prog = nv20_fb_tile_prog; + return nouveau_fb_preinit(&priv->base); +} + +struct nouveau_oclass +nv25_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x25), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv25_fb_ctor, + .dtor = _nouveau_fb_dtor, + .init = _nouveau_fb_init, + .fini = _nouveau_fb_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c new file mode 100644 index 0000000..a7ba0d0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c @@ -0,0 +1,159 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/fb.h> + +struct nv30_fb_priv { + struct nouveau_fb base; +}; + +void +nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, + u32 flags, struct nouveau_fb_tile *tile) +{ + /* for performance, select alternate bank offset for zeta */ + if (!(flags & 4)) { + tile->addr = (0 << 4); + } else { + if (pfb->tile.comp) /* z compression */ + pfb->tile.comp(pfb, i, size, flags, tile); + tile->addr = (1 << 4); + } + + tile->addr |= 0x00000001; /* enable */ + tile->addr |= addr; + tile->limit = max(1u, addr + size) - 1; + tile->pitch = pitch; +} + +static void +nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, + struct nouveau_fb_tile *tile) +{ + u32 tiles = DIV_ROUND_UP(size, 0x40); + u32 tags = round_up(tiles / pfb->ram.parts, 0x40); + if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { + if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */ + else tile->zcomp |= 0x02000000; /* Z24S8 */ + tile->zcomp |= ((tile->tag->offset ) >> 6); + tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 12; +#ifdef __BIG_ENDIAN + tile->zcomp |= 0x10000000; +#endif + } +} + +static int +calc_bias(struct nv30_fb_priv *priv, int k, int i, int j) +{ + struct nouveau_device *device = nv_device(priv); + int b = (device->chipset > 0x30 ? + nv_rd32(priv, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) : + 0) & 0xf; + + return 2 * (b & 0x8 ? b - 0x10 : b); +} + +static int +calc_ref(struct nv30_fb_priv *priv, int l, int k, int i) +{ + int j, x = 0; + + for (j = 0; j < 4; j++) { + int m = (l >> (8 * i) & 0xff) + calc_bias(priv, k, i, j); + + x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j); + } + + return x; +} + +int +nv30_fb_init(struct nouveau_object *object) +{ + struct nouveau_device *device = nv_device(object); + struct nv30_fb_priv *priv = (void *)object; + int ret, i, j; + + ret = nouveau_fb_init(&priv->base); + if (ret) + return ret; + + /* Init the memory timing regs at 0x10037c/0x1003ac */ + if (device->chipset == 0x30 || + device->chipset == 0x31 || + device->chipset == 0x35) { + /* Related to ROP count */ + int n = (device->chipset == 0x31 ? 2 : 4); + int l = nv_rd32(priv, 0x1003d0); + + for (i = 0; i < n; i++) { + for (j = 0; j < 3; j++) + nv_wr32(priv, 0x10037c + 0xc * i + 0x4 * j, + calc_ref(priv, l, 0, j)); + + for (j = 0; j < 2; j++) + nv_wr32(priv, 0x1003ac + 0x8 * i + 0x4 * j, + calc_ref(priv, l, 1, j)); + } + } + + return 0; +} + +static int +nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv30_fb_priv *priv; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.memtype_valid = nv04_fb_memtype_valid; + priv->base.ram.init = nv20_fb_vram_init; + priv->base.tile.regions = 8; + priv->base.tile.init = nv30_fb_tile_init; + priv->base.tile.comp = nv30_fb_tile_comp; + priv->base.tile.fini = nv20_fb_tile_fini; + priv->base.tile.prog = nv20_fb_tile_prog; + return nouveau_fb_preinit(&priv->base); +} + +struct nouveau_oclass +nv30_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x30), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv30_fb_ctor, + .dtor = _nouveau_fb_dtor, + .init = nv30_fb_init, + .fini = _nouveau_fb_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c new file mode 100644 index 0000000..092f6f4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/fb.h> + +struct nv35_fb_priv { + struct nouveau_fb base; +}; + +static void +nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, + struct nouveau_fb_tile *tile) +{ + u32 tiles = DIV_ROUND_UP(size, 0x40); + u32 tags = round_up(tiles / pfb->ram.parts, 0x40); + if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { + if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */ + else tile->zcomp |= 0x08000000; /* Z24S8 */ + tile->zcomp |= ((tile->tag->offset ) >> 6); + tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13; +#ifdef __BIG_ENDIAN + tile->zcomp |= 0x40000000; +#endif + } +} + +static int +nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv35_fb_priv *priv; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.memtype_valid = nv04_fb_memtype_valid; + priv->base.ram.init = nv20_fb_vram_init; + priv->base.tile.regions = 8; + priv->base.tile.init = nv30_fb_tile_init; + priv->base.tile.comp = nv35_fb_tile_comp; + priv->base.tile.fini = nv20_fb_tile_fini; + priv->base.tile.prog = nv20_fb_tile_prog; + return nouveau_fb_preinit(&priv->base); +} + +struct nouveau_oclass +nv35_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x35), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv35_fb_ctor, + .dtor = _nouveau_fb_dtor, + .init = nv30_fb_init, + .fini = _nouveau_fb_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c new file mode 100644 index 0000000..797ab3b --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/fb.h> + +struct nv36_fb_priv { + struct nouveau_fb base; +}; + +static void +nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, + struct nouveau_fb_tile *tile) +{ + u32 tiles = DIV_ROUND_UP(size, 0x40); + u32 tags = round_up(tiles / pfb->ram.parts, 0x40); + if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { + if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */ + else tile->zcomp |= 0x20000000; /* Z24S8 */ + tile->zcomp |= ((tile->tag->offset ) >> 6); + tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14; +#ifdef __BIG_ENDIAN + tile->zcomp |= 0x80000000; +#endif + } +} + +static int +nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv36_fb_priv *priv; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.memtype_valid = nv04_fb_memtype_valid; + priv->base.ram.init = nv20_fb_vram_init; + priv->base.tile.regions = 8; + priv->base.tile.init = nv30_fb_tile_init; + priv->base.tile.comp = nv36_fb_tile_comp; + priv->base.tile.fini = nv20_fb_tile_fini; + priv->base.tile.prog = nv20_fb_tile_prog; + return nouveau_fb_preinit(&priv->base); +} + +struct nouveau_oclass +nv36_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x36), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv36_fb_ctor, + .dtor = _nouveau_fb_dtor, + .init = nv30_fb_init, + .fini = _nouveau_fb_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c new file mode 100644 index 0000000..65e131b --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/fb.h> + +struct nv40_fb_priv { + struct nouveau_fb base; +}; + +static int +nv40_fb_vram_init(struct nouveau_fb *pfb) +{ + u32 pbus1218 = nv_rd32(pfb, 0x001218); + switch (pbus1218 & 0x00000300) { + case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break; + case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break; + case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break; + case 0x00000300: pfb->ram.type = NV_MEM_TYPE_DDR2; break; + } + + pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; + pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; + return nv_rd32(pfb, 0x100320); +} + +void +nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, + struct nouveau_fb_tile *tile) +{ + u32 tiles = DIV_ROUND_UP(size, 0x80); + u32 tags = round_up(tiles / pfb->ram.parts, 0x100); + if ( (flags & 2) && + !nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { + tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */ + tile->zcomp |= ((tile->tag->offset ) >> 8); + tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13; +#ifdef __BIG_ENDIAN + tile->zcomp |= 0x40000000; +#endif + } +} + +static int +nv40_fb_init(struct nouveau_object *object) +{ + struct nv40_fb_priv *priv = (void *)object; + int ret; + + ret = nouveau_fb_init(&priv->base); + if (ret) + return ret; + + nv_mask(priv, 0x10033c, 0x00008000, 0x00000000); + return 0; +} + +static int +nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv40_fb_priv *priv; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.memtype_valid = nv04_fb_memtype_valid; + priv->base.ram.init = nv40_fb_vram_init; + priv->base.tile.regions = 8; + priv->base.tile.init = nv30_fb_tile_init; + priv->base.tile.comp = nv40_fb_tile_comp; + priv->base.tile.fini = nv20_fb_tile_fini; + priv->base.tile.prog = nv20_fb_tile_prog; + return nouveau_fb_preinit(&priv->base); +} + + +struct nouveau_oclass +nv40_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x40), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv40_fb_ctor, + .dtor = _nouveau_fb_dtor, + .init = nv40_fb_init, + .fini = _nouveau_fb_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c new file mode 100644 index 0000000..e9e5a08 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/fb.h> + +struct nv41_fb_priv { + struct nouveau_fb base; +}; + +int +nv41_fb_vram_init(struct nouveau_fb *pfb) +{ + u32 pfb474 = nv_rd32(pfb, 0x100474); + if (pfb474 & 0x00000004) + pfb->ram.type = NV_MEM_TYPE_GDDR3; + if (pfb474 & 0x00000002) + pfb->ram.type = NV_MEM_TYPE_DDR2; + if (pfb474 & 0x00000001) + pfb->ram.type = NV_MEM_TYPE_DDR1; + + pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; + pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; + return nv_rd32(pfb, 0x100320); +} + +void +nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) +{ + nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit); + nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch); + nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr); + nv_rd32(pfb, 0x100600 + (i * 0x10)); + nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp); +} + +int +nv41_fb_init(struct nouveau_object *object) +{ + struct nv41_fb_priv *priv = (void *)object; + int ret; + + ret = nouveau_fb_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x100800, 0x00000001); + return 0; +} + +static int +nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv41_fb_priv *priv; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.memtype_valid = nv04_fb_memtype_valid; + priv->base.ram.init = nv41_fb_vram_init; + priv->base.tile.regions = 12; + priv->base.tile.init = nv30_fb_tile_init; + priv->base.tile.comp = nv40_fb_tile_comp; + priv->base.tile.fini = nv20_fb_tile_fini; + priv->base.tile.prog = nv41_fb_tile_prog; + return nouveau_fb_preinit(&priv->base); +} + + +struct nouveau_oclass +nv41_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x41), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv41_fb_ctor, + .dtor = _nouveau_fb_dtor, + .init = nv41_fb_init, + .fini = _nouveau_fb_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c new file mode 100644 index 0000000..ae89b50 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/fb.h> + +struct nv44_fb_priv { + struct nouveau_fb base; +}; + +int +nv44_fb_vram_init(struct nouveau_fb *pfb) +{ + u32 pfb474 = nv_rd32(pfb, 0x100474); + if (pfb474 & 0x00000004) + pfb->ram.type = NV_MEM_TYPE_GDDR3; + if (pfb474 & 0x00000002) + pfb->ram.type = NV_MEM_TYPE_DDR2; + if (pfb474 & 0x00000001) + pfb->ram.type = NV_MEM_TYPE_DDR1; + + pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; + return 0; +} + +static void +nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, + u32 flags, struct nouveau_fb_tile *tile) +{ + tile->addr = 0x00000001; /* mode = vram */ + tile->addr |= addr; + tile->limit = max(1u, addr + size) - 1; + tile->pitch = pitch; +} + +void +nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) +{ + nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit); + nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch); + nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr); + nv_rd32(pfb, 0x100600 + (i * 0x10)); +} + +int +nv44_fb_init(struct nouveau_object *object) +{ + struct nv44_fb_priv *priv = (void *)object; + int ret; + + ret = nouveau_fb_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x100850, 0x80000000); + nv_wr32(priv, 0x100800, 0x00000001); + return 0; +} + +static int +nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv44_fb_priv *priv; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.memtype_valid = nv04_fb_memtype_valid; + priv->base.ram.init = nv44_fb_vram_init; + priv->base.tile.regions = 12; + priv->base.tile.init = nv44_fb_tile_init; + priv->base.tile.fini = nv20_fb_tile_fini; + priv->base.tile.prog = nv44_fb_tile_prog; + return nouveau_fb_preinit(&priv->base); +} + + +struct nouveau_oclass +nv44_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x44), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv44_fb_ctor, + .dtor = _nouveau_fb_dtor, + .init = nv44_fb_init, + .fini = _nouveau_fb_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c new file mode 100644 index 0000000..589b93e --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/fb.h> + +struct nv46_fb_priv { + struct nouveau_fb base; +}; + +void +nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, + u32 flags, struct nouveau_fb_tile *tile) +{ + /* for performance, select alternate bank offset for zeta */ + if (!(flags & 4)) tile->addr = (0 << 3); + else tile->addr = (1 << 3); + + tile->addr |= 0x00000001; /* mode = vram */ + tile->addr |= addr; + tile->limit = max(1u, addr + size) - 1; + tile->pitch = pitch; +} + +static int +nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv46_fb_priv *priv; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.memtype_valid = nv04_fb_memtype_valid; + priv->base.ram.init = nv44_fb_vram_init; + priv->base.tile.regions = 15; + priv->base.tile.init = nv46_fb_tile_init; + priv->base.tile.fini = nv20_fb_tile_fini; + priv->base.tile.prog = nv44_fb_tile_prog; + return nouveau_fb_preinit(&priv->base); +} + + +struct nouveau_oclass +nv46_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x46), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv46_fb_ctor, + .dtor = _nouveau_fb_dtor, + .init = nv44_fb_init, + .fini = _nouveau_fb_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c new file mode 100644 index 0000000..818bba3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/fb.h> + +struct nv47_fb_priv { + struct nouveau_fb base; +}; + +static int +nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv47_fb_priv *priv; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.memtype_valid = nv04_fb_memtype_valid; + priv->base.ram.init = nv41_fb_vram_init; + priv->base.tile.regions = 15; + priv->base.tile.init = nv30_fb_tile_init; + priv->base.tile.comp = nv40_fb_tile_comp; + priv->base.tile.fini = nv20_fb_tile_fini; + priv->base.tile.prog = nv41_fb_tile_prog; + return nouveau_fb_preinit(&priv->base); +} + + +struct nouveau_oclass +nv47_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x47), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv47_fb_ctor, + .dtor = _nouveau_fb_dtor, + .init = nv41_fb_init, + .fini = _nouveau_fb_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c new file mode 100644 index 0000000..84a31af --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/fb.h> + +struct nv49_fb_priv { + struct nouveau_fb base; +}; + +static int +nv49_fb_vram_init(struct nouveau_fb *pfb) +{ + u32 pfb914 = nv_rd32(pfb, 0x100914); + + switch (pfb914 & 0x00000003) { + case 0x00000000: pfb->ram.type = NV_MEM_TYPE_DDR1; break; + case 0x00000001: pfb->ram.type = NV_MEM_TYPE_DDR2; break; + case 0x00000002: pfb->ram.type = NV_MEM_TYPE_GDDR3; break; + case 0x00000003: break; + } + + pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; + pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; + return nv_rd32(pfb, 0x100320); +} + +static int +nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv49_fb_priv *priv; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.memtype_valid = nv04_fb_memtype_valid; + priv->base.ram.init = nv49_fb_vram_init; + priv->base.tile.regions = 15; + priv->base.tile.init = nv30_fb_tile_init; + priv->base.tile.comp = nv40_fb_tile_comp; + priv->base.tile.fini = nv20_fb_tile_fini; + priv->base.tile.prog = nv41_fb_tile_prog; + + return nouveau_fb_preinit(&priv->base); +} + + +struct nouveau_oclass +nv49_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x49), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv49_fb_ctor, + .dtor = _nouveau_fb_dtor, + .init = nv41_fb_init, + .fini = _nouveau_fb_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c new file mode 100644 index 0000000..797fd55 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/fb.h> + +struct nv4e_fb_priv { + struct nouveau_fb base; +}; + +static int +nv4e_fb_vram_init(struct nouveau_fb *pfb) +{ + pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; + pfb->ram.type = NV_MEM_TYPE_STOLEN; + return 0; +} + +static int +nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv4e_fb_priv *priv; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.memtype_valid = nv04_fb_memtype_valid; + priv->base.ram.init = nv4e_fb_vram_init; + priv->base.tile.regions = 12; + priv->base.tile.init = nv46_fb_tile_init; + priv->base.tile.fini = nv20_fb_tile_fini; + priv->base.tile.prog = nv44_fb_tile_prog; + return nouveau_fb_preinit(&priv->base); +} + +struct nouveau_oclass +nv4e_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x4e), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv4e_fb_ctor, + .dtor = _nouveau_fb_dtor, + .init = nv44_fb_init, + .fini = _nouveau_fb_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c new file mode 100644 index 0000000..0772ec9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c @@ -0,0 +1,522 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/client.h> +#include <core/enum.h> +#include <core/engctx.h> +#include <core/object.h> + +#include <subdev/fb.h> +#include <subdev/bios.h> + +struct nv50_fb_priv { + struct nouveau_fb base; + struct page *r100c08_page; + dma_addr_t r100c08; +}; + +static int types[0x80] = { + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2, + 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0 +}; + +static bool +nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype) +{ + return types[(memtype & 0xff00) >> 8] != 0; +} + +static u32 +nv50_fb_vram_rblock(struct nouveau_fb *pfb) +{ + int i, parts, colbits, rowbitsa, rowbitsb, banks; + u64 rowsize, predicted; + u32 r0, r4, rt, ru, rblock_size; + + r0 = nv_rd32(pfb, 0x100200); + r4 = nv_rd32(pfb, 0x100204); + rt = nv_rd32(pfb, 0x100250); + ru = nv_rd32(pfb, 0x001540); + nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); + + for (i = 0, parts = 0; i < 8; i++) { + if (ru & (0x00010000 << i)) + parts++; + } + + colbits = (r4 & 0x0000f000) >> 12; + rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; + rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; + banks = 1 << (((r4 & 0x03000000) >> 24) + 2); + + rowsize = parts * banks * (1 << colbits) * 8; + predicted = rowsize << rowbitsa; + if (r0 & 0x00000004) + predicted += rowsize << rowbitsb; + + if (predicted != pfb->ram.size) { + nv_warn(pfb, "memory controller reports %d MiB VRAM\n", + (u32)(pfb->ram.size >> 20)); + } + + rblock_size = rowsize; + if (rt & 1) + rblock_size *= 3; + + nv_debug(pfb, "rblock %d bytes\n", rblock_size); + return rblock_size; +} + +static int +nv50_fb_vram_init(struct nouveau_fb *pfb) +{ + struct nouveau_device *device = nv_device(pfb); + struct nouveau_bios *bios = nouveau_bios(device); + const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ + const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ + u32 size, tags = 0; + int ret; + + pfb->ram.size = nv_rd32(pfb, 0x10020c); + pfb->ram.size = (pfb->ram.size & 0xffffff00) | + ((pfb->ram.size & 0x000000ff) << 32); + + size = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail; + switch (device->chipset) { + case 0xaa: + case 0xac: + case 0xaf: /* IGPs, no reordering, no real VRAM */ + ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1); + if (ret) + return ret; + + pfb->ram.type = NV_MEM_TYPE_STOLEN; + pfb->ram.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12; + break; + default: + switch (nv_rd32(pfb, 0x100714) & 0x00000007) { + case 0: pfb->ram.type = NV_MEM_TYPE_DDR1; break; + case 1: + if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3) + pfb->ram.type = NV_MEM_TYPE_DDR3; + else + pfb->ram.type = NV_MEM_TYPE_DDR2; + break; + case 2: pfb->ram.type = NV_MEM_TYPE_GDDR3; break; + case 3: pfb->ram.type = NV_MEM_TYPE_GDDR4; break; + case 4: pfb->ram.type = NV_MEM_TYPE_GDDR5; break; + default: + break; + } + + ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, + nv50_fb_vram_rblock(pfb) >> 12); + if (ret) + return ret; + + pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1; + tags = nv_rd32(pfb, 0x100320); + break; + } + + return tags; +} + +static int +nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, + u32 memtype, struct nouveau_mem **pmem) +{ + struct nv50_fb_priv *priv = (void *)pfb; + struct nouveau_mm *heap = &priv->base.vram; + struct nouveau_mm *tags = &priv->base.tags; + struct nouveau_mm_node *r; + struct nouveau_mem *mem; + int comp = (memtype & 0x300) >> 8; + int type = (memtype & 0x07f); + int back = (memtype & 0x800); + int min, max, ret; + + max = (size >> 12); + min = ncmin ? (ncmin >> 12) : max; + align >>= 12; + + mem = kzalloc(sizeof(*mem), GFP_KERNEL); + if (!mem) + return -ENOMEM; + + mutex_lock(&pfb->base.mutex); + if (comp) { + if (align == 16) { + int n = (max >> 4) * comp; + + ret = nouveau_mm_head(tags, 1, n, n, 1, &mem->tag); + if (ret) + mem->tag = NULL; + } + + if (unlikely(!mem->tag)) + comp = 0; + } + + INIT_LIST_HEAD(&mem->regions); + mem->memtype = (comp << 7) | type; + mem->size = max; + + type = types[type]; + do { + if (back) + ret = nouveau_mm_tail(heap, type, max, min, align, &r); + else + ret = nouveau_mm_head(heap, type, max, min, align, &r); + if (ret) { + mutex_unlock(&pfb->base.mutex); + pfb->ram.put(pfb, &mem); + return ret; + } + + list_add_tail(&r->rl_entry, &mem->regions); + max -= r->length; + } while (max); + mutex_unlock(&pfb->base.mutex); + + r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry); + mem->offset = (u64)r->offset << 12; + *pmem = mem; + return 0; +} + +void +nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem) +{ + struct nv50_fb_priv *priv = (void *)pfb; + struct nouveau_mm_node *this; + struct nouveau_mem *mem; + + mem = *pmem; + *pmem = NULL; + if (unlikely(mem == NULL)) + return; + + mutex_lock(&pfb->base.mutex); + while (!list_empty(&mem->regions)) { + this = list_first_entry(&mem->regions, typeof(*this), rl_entry); + + list_del(&this->rl_entry); + nouveau_mm_free(&priv->base.vram, &this); + } + + nouveau_mm_free(&priv->base.tags, &mem->tag); + mutex_unlock(&pfb->base.mutex); + + kfree(mem); +} + +static const struct nouveau_enum vm_dispatch_subclients[] = { + { 0x00000000, "GRCTX", NULL }, + { 0x00000001, "NOTIFY", NULL }, + { 0x00000002, "QUERY", NULL }, + { 0x00000003, "COND", NULL }, + { 0x00000004, "M2M_IN", NULL }, + { 0x00000005, "M2M_OUT", NULL }, + { 0x00000006, "M2M_NOTIFY", NULL }, + {} +}; + +static const struct nouveau_enum vm_ccache_subclients[] = { + { 0x00000000, "CB", NULL }, + { 0x00000001, "TIC", NULL }, + { 0x00000002, "TSC", NULL }, + {} +}; + +static const struct nouveau_enum vm_prop_subclients[] = { + { 0x00000000, "RT0", NULL }, + { 0x00000001, "RT1", NULL }, + { 0x00000002, "RT2", NULL }, + { 0x00000003, "RT3", NULL }, + { 0x00000004, "RT4", NULL }, + { 0x00000005, "RT5", NULL }, + { 0x00000006, "RT6", NULL }, + { 0x00000007, "RT7", NULL }, + { 0x00000008, "ZETA", NULL }, + { 0x00000009, "LOCAL", NULL }, + { 0x0000000a, "GLOBAL", NULL }, + { 0x0000000b, "STACK", NULL }, + { 0x0000000c, "DST2D", NULL }, + {} +}; + +static const struct nouveau_enum vm_pfifo_subclients[] = { + { 0x00000000, "PUSHBUF", NULL }, + { 0x00000001, "SEMAPHORE", NULL }, + {} +}; + +static const struct nouveau_enum vm_bar_subclients[] = { + { 0x00000000, "FB", NULL }, + { 0x00000001, "IN", NULL }, + {} +}; + +static const struct nouveau_enum vm_client[] = { + { 0x00000000, "STRMOUT", NULL }, + { 0x00000003, "DISPATCH", vm_dispatch_subclients }, + { 0x00000004, "PFIFO_WRITE", NULL }, + { 0x00000005, "CCACHE", vm_ccache_subclients }, + { 0x00000006, "PPPP", NULL }, + { 0x00000007, "CLIPID", NULL }, + { 0x00000008, "PFIFO_READ", NULL }, + { 0x00000009, "VFETCH", NULL }, + { 0x0000000a, "TEXTURE", NULL }, + { 0x0000000b, "PROP", vm_prop_subclients }, + { 0x0000000c, "PVP", NULL }, + { 0x0000000d, "PBSP", NULL }, + { 0x0000000e, "PCRYPT", NULL }, + { 0x0000000f, "PCOUNTER", NULL }, + { 0x00000011, "PDAEMON", NULL }, + {} +}; + +static const struct nouveau_enum vm_engine[] = { + { 0x00000000, "PGRAPH", NULL, NVDEV_ENGINE_GR }, + { 0x00000001, "PVP", NULL, NVDEV_ENGINE_VP }, + { 0x00000004, "PEEPHOLE", NULL }, + { 0x00000005, "PFIFO", vm_pfifo_subclients, NVDEV_ENGINE_FIFO }, + { 0x00000006, "BAR", vm_bar_subclients }, + { 0x00000008, "PPPP", NULL, NVDEV_ENGINE_PPP }, + { 0x00000008, "PMPEG", NULL, NVDEV_ENGINE_MPEG }, + { 0x00000009, "PBSP", NULL, NVDEV_ENGINE_BSP }, + { 0x0000000a, "PCRYPT", NULL, NVDEV_ENGINE_CRYPT }, + { 0x0000000b, "PCOUNTER", NULL }, + { 0x0000000c, "SEMAPHORE_BG", NULL }, + { 0x0000000d, "PCOPY", NULL, NVDEV_ENGINE_COPY0 }, + { 0x0000000e, "PDAEMON", NULL }, + {} +}; + +static const struct nouveau_enum vm_fault[] = { + { 0x00000000, "PT_NOT_PRESENT", NULL }, + { 0x00000001, "PT_TOO_SHORT", NULL }, + { 0x00000002, "PAGE_NOT_PRESENT", NULL }, + { 0x00000003, "PAGE_SYSTEM_ONLY", NULL }, + { 0x00000004, "PAGE_READ_ONLY", NULL }, + { 0x00000006, "NULL_DMAOBJ", NULL }, + { 0x00000007, "WRONG_MEMTYPE", NULL }, + { 0x0000000b, "VRAM_LIMIT", NULL }, + { 0x0000000f, "DMAOBJ_LIMIT", NULL }, + {} +}; + +static void +nv50_fb_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_device *device = nv_device(subdev); + struct nouveau_engine *engine; + struct nv50_fb_priv *priv = (void *)subdev; + const struct nouveau_enum *en, *cl; + struct nouveau_object *engctx = NULL; + u32 trap[6], idx, chan; + u8 st0, st1, st2, st3; + int i; + + idx = nv_rd32(priv, 0x100c90); + if (!(idx & 0x80000000)) + return; + idx &= 0x00ffffff; + + for (i = 0; i < 6; i++) { + nv_wr32(priv, 0x100c90, idx | i << 24); + trap[i] = nv_rd32(priv, 0x100c94); + } + nv_wr32(priv, 0x100c90, idx | 0x80000000); + + /* decode status bits into something more useful */ + if (device->chipset < 0xa3 || + device->chipset == 0xaa || device->chipset == 0xac) { + st0 = (trap[0] & 0x0000000f) >> 0; + st1 = (trap[0] & 0x000000f0) >> 4; + st2 = (trap[0] & 0x00000f00) >> 8; + st3 = (trap[0] & 0x0000f000) >> 12; + } else { + st0 = (trap[0] & 0x000000ff) >> 0; + st1 = (trap[0] & 0x0000ff00) >> 8; + st2 = (trap[0] & 0x00ff0000) >> 16; + st3 = (trap[0] & 0xff000000) >> 24; + } + chan = (trap[2] << 16) | trap[1]; + + en = nouveau_enum_find(vm_engine, st0); + + if (en && en->data2) { + const struct nouveau_enum *orig_en = en; + while (en->name && en->value == st0 && en->data2) { + engine = nouveau_engine(subdev, en->data2); + if (engine) { + engctx = nouveau_engctx_get(engine, chan); + if (engctx) + break; + } + en++; + } + if (!engctx) + en = orig_en; + } + + nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x [%s] ", + (trap[5] & 0x00000100) ? "read" : "write", + trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan, + nouveau_client_name(engctx)); + + nouveau_engctx_put(engctx); + + if (en) + pr_cont("%s/", en->name); + else + pr_cont("%02x/", st0); + + cl = nouveau_enum_find(vm_client, st2); + if (cl) + pr_cont("%s/", cl->name); + else + pr_cont("%02x/", st2); + + if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3); + else if (en && en->data) cl = nouveau_enum_find(en->data, st3); + else cl = NULL; + if (cl) + pr_cont("%s", cl->name); + else + pr_cont("%02x", st3); + + pr_cont(" reason: "); + en = nouveau_enum_find(vm_fault, st1); + if (en) + pr_cont("%s\n", en->name); + else + pr_cont("0x%08x\n", st1); +} + +static int +nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct nv50_fb_priv *priv; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (priv->r100c08_page) { + priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page, + 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(device->pdev, priv->r100c08)) + nv_warn(priv, "failed 0x100c08 page map\n"); + } else { + nv_warn(priv, "failed 0x100c08 page alloc\n"); + } + + priv->base.memtype_valid = nv50_fb_memtype_valid; + priv->base.ram.init = nv50_fb_vram_init; + priv->base.ram.get = nv50_fb_vram_new; + priv->base.ram.put = nv50_fb_vram_del; + nv_subdev(priv)->intr = nv50_fb_intr; + return nouveau_fb_preinit(&priv->base); +} + +static void +nv50_fb_dtor(struct nouveau_object *object) +{ + struct nouveau_device *device = nv_device(object); + struct nv50_fb_priv *priv = (void *)object; + + if (priv->r100c08_page) { + pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + __free_page(priv->r100c08_page); + } + + nouveau_fb_destroy(&priv->base); +} + +static int +nv50_fb_init(struct nouveau_object *object) +{ + struct nouveau_device *device = nv_device(object); + struct nv50_fb_priv *priv = (void *)object; + int ret; + + ret = nouveau_fb_init(&priv->base); + if (ret) + return ret; + + /* Not a clue what this is exactly. Without pointing it at a + * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) + * cause IOMMU "read from address 0" errors (rh#561267) + */ + nv_wr32(priv, 0x100c08, priv->r100c08 >> 8); + + /* This is needed to get meaningful information from 100c90 + * on traps. No idea what these values mean exactly. */ + switch (device->chipset) { + case 0x50: + nv_wr32(priv, 0x100c90, 0x000707ff); + break; + case 0xa3: + case 0xa5: + case 0xa8: + nv_wr32(priv, 0x100c90, 0x000d0fff); + break; + case 0xaf: + nv_wr32(priv, 0x100c90, 0x089d1fff); + break; + default: + nv_wr32(priv, 0x100c90, 0x001d07ff); + break; + } + + return 0; +} + +struct nouveau_oclass +nv50_fb_oclass = { + .handle = NV_SUBDEV(FB, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_fb_ctor, + .dtor = nv50_fb_dtor, + .init = nv50_fb_init, + .fini = _nouveau_fb_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c new file mode 100644 index 0000000..86ad592 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c @@ -0,0 +1,247 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/fb.h> +#include <subdev/ltcg.h> +#include <subdev/bios.h> + +struct nvc0_fb_priv { + struct nouveau_fb base; + struct page *r100c10_page; + dma_addr_t r100c10; +}; + +extern const u8 nvc0_pte_storage_type_map[256]; + + +static bool +nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags) +{ + u8 memtype = (tile_flags & 0x0000ff00) >> 8; + return likely((nvc0_pte_storage_type_map[memtype] != 0xff)); +} + +static int +nvc0_fb_vram_init(struct nouveau_fb *pfb) +{ + struct nouveau_bios *bios = nouveau_bios(pfb); + const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ + const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ + u32 parts = nv_rd32(pfb, 0x022438); + u32 pmask = nv_rd32(pfb, 0x022554); + u32 bsize = nv_rd32(pfb, 0x10f20c); + u32 offset, length; + bool uniform = true; + int ret, part; + + nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800)); + nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask); + + pfb->ram.type = nouveau_fb_bios_memtype(bios); + pfb->ram.ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1; + + /* read amount of vram attached to each memory controller */ + for (part = 0; part < parts; part++) { + if (!(pmask & (1 << part))) { + u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000)); + if (psize != bsize) { + if (psize < bsize) + bsize = psize; + uniform = false; + } + + nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize); + pfb->ram.size += (u64)psize << 20; + } + } + + /* if all controllers have the same amount attached, there's no holes */ + if (uniform) { + offset = rsvd_head; + length = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail; + return nouveau_mm_init(&pfb->vram, offset, length, 1); + } + + /* otherwise, address lowest common amount from 0GiB */ + ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1); + if (ret) + return ret; + + /* and the rest starting from (8GiB + common_size) */ + offset = (0x0200000000ULL >> 12) + (bsize << 8); + length = (pfb->ram.size >> 12) - (bsize << 8) - rsvd_tail; + + ret = nouveau_mm_init(&pfb->vram, offset, length, 0); + if (ret) { + nouveau_mm_fini(&pfb->vram); + return ret; + } + + return 0; +} + +static int +nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, + u32 memtype, struct nouveau_mem **pmem) +{ + struct nouveau_mm *mm = &pfb->vram; + struct nouveau_mm_node *r; + struct nouveau_mem *mem; + int type = (memtype & 0x0ff); + int back = (memtype & 0x800); + int ret; + const bool comp = nvc0_pte_storage_type_map[type] != type; + + size >>= 12; + align >>= 12; + ncmin >>= 12; + if (!ncmin) + ncmin = size; + + mem = kzalloc(sizeof(*mem), GFP_KERNEL); + if (!mem) + return -ENOMEM; + + INIT_LIST_HEAD(&mem->regions); + mem->size = size; + + mutex_lock(&pfb->base.mutex); + if (comp) { + struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb->base.base.parent); + + /* compression only works with lpages */ + if (align == (1 << (17 - 12))) { + int n = size >> 5; + ltcg->tags_alloc(ltcg, n, &mem->tag); + } + if (unlikely(!mem->tag)) + type = nvc0_pte_storage_type_map[type]; + } + mem->memtype = type; + + do { + if (back) + ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r); + else + ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r); + if (ret) { + mutex_unlock(&pfb->base.mutex); + pfb->ram.put(pfb, &mem); + return ret; + } + + list_add_tail(&r->rl_entry, &mem->regions); + size -= r->length; + } while (size); + mutex_unlock(&pfb->base.mutex); + + r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry); + mem->offset = (u64)r->offset << 12; + *pmem = mem; + return 0; +} + +static void +nvc0_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem) +{ + struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb->base.base.parent); + + if ((*pmem)->tag) + ltcg->tags_free(ltcg, &(*pmem)->tag); + + nv50_fb_vram_del(pfb, pmem); +} + +static int +nvc0_fb_init(struct nouveau_object *object) +{ + struct nvc0_fb_priv *priv = (void *)object; + int ret; + + ret = nouveau_fb_init(&priv->base); + if (ret) + return ret; + + if (priv->r100c10_page) + nv_wr32(priv, 0x100c10, priv->r100c10 >> 8); + return 0; +} + +static void +nvc0_fb_dtor(struct nouveau_object *object) +{ + struct nouveau_device *device = nv_device(object); + struct nvc0_fb_priv *priv = (void *)object; + + if (priv->r100c10_page) { + pci_unmap_page(device->pdev, priv->r100c10, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + __free_page(priv->r100c10_page); + } + + nouveau_fb_destroy(&priv->base); +} + +static int +nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct nvc0_fb_priv *priv; + int ret; + + ret = nouveau_fb_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.memtype_valid = nvc0_fb_memtype_valid; + priv->base.ram.init = nvc0_fb_vram_init; + priv->base.ram.get = nvc0_fb_vram_new; + priv->base.ram.put = nvc0_fb_vram_del; + + priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (priv->r100c10_page) { + priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page, + 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(device->pdev, priv->r100c10)) + return -EFAULT; + } + + return nouveau_fb_preinit(&priv->base); +} + + +struct nouveau_oclass +nvc0_fb_oclass = { + .handle = NV_SUBDEV(FB, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_fb_ctor, + .dtor = nvc0_fb_dtor, + .init = nvc0_fb_init, + .fini = _nouveau_fb_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c new file mode 100644 index 0000000..d422acc --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c @@ -0,0 +1,158 @@ +/* + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/gpio.h> +#include <subdev/bios.h> +#include <subdev/bios/gpio.h> + +static int +nouveau_gpio_drive(struct nouveau_gpio *gpio, + int idx, int line, int dir, int out) +{ + return gpio->drive ? gpio->drive(gpio, line, dir, out) : -ENODEV; +} + +static int +nouveau_gpio_sense(struct nouveau_gpio *gpio, int idx, int line) +{ + return gpio->sense ? gpio->sense(gpio, line) : -ENODEV; +} + +static int +nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line, + struct dcb_gpio_func *func) +{ + struct nouveau_bios *bios = nouveau_bios(gpio); + u8 ver, len; + u16 data; + + if (line == 0xff && tag == 0xff) + return -EINVAL; + + data = dcb_gpio_match(bios, idx, tag, line, &ver, &len, func); + if (data) + return 0; + + /* Apple iMac G4 NV18 */ + if (nv_device_match(nv_object(gpio), 0x0189, 0x10de, 0x0010)) { + if (tag == DCB_GPIO_TVDAC0) { + *func = (struct dcb_gpio_func) { + .func = DCB_GPIO_TVDAC0, + .line = 4, + .log[0] = 0, + .log[1] = 1, + }; + return 0; + } + } + + return -EINVAL; +} + +static int +nouveau_gpio_set(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line, int state) +{ + struct dcb_gpio_func func; + int ret; + + ret = nouveau_gpio_find(gpio, idx, tag, line, &func); + if (ret == 0) { + int dir = !!(func.log[state] & 0x02); + int out = !!(func.log[state] & 0x01); + ret = nouveau_gpio_drive(gpio, idx, func.line, dir, out); + } + + return ret; +} + +static int +nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line) +{ + struct dcb_gpio_func func; + int ret; + + ret = nouveau_gpio_find(gpio, idx, tag, line, &func); + if (ret == 0) { + ret = nouveau_gpio_sense(gpio, idx, func.line); + if (ret >= 0) + ret = (ret == (func.log[1] & 1)); + } + + return ret; +} + +void +_nouveau_gpio_dtor(struct nouveau_object *object) +{ + struct nouveau_gpio *gpio = (void *)object; + nouveau_event_destroy(&gpio->events); + nouveau_subdev_destroy(&gpio->base); +} + +int +nouveau_gpio_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, int lines, + int length, void **pobject) +{ + struct nouveau_gpio *gpio; + int ret; + + ret = nouveau_subdev_create_(parent, engine, oclass, 0, "GPIO", "gpio", + length, pobject); + gpio = *pobject; + if (ret) + return ret; + + ret = nouveau_event_create(lines, &gpio->events); + if (ret) + return ret; + + gpio->find = nouveau_gpio_find; + gpio->set = nouveau_gpio_set; + gpio->get = nouveau_gpio_get; + return 0; +} + +static struct dmi_system_id gpio_reset_ids[] = { + { + .ident = "Apple Macbook 10,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"), + } + }, + { } +}; + +int +nouveau_gpio_init(struct nouveau_gpio *gpio) +{ + int ret = nouveau_subdev_init(&gpio->base); + if (ret == 0 && gpio->reset) { + if (dmi_check_system(gpio_reset_ids)) + gpio->reset(gpio, DCB_GPIO_UNUSED); + } + return ret; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c new file mode 100644 index 0000000..76d5d54 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c @@ -0,0 +1,177 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "priv.h" + +struct nv10_gpio_priv { + struct nouveau_gpio base; +}; + +static int +nv10_gpio_sense(struct nouveau_gpio *gpio, int line) +{ + if (line < 2) { + line = line * 16; + line = nv_rd32(gpio, 0x600818) >> line; + return !!(line & 0x0100); + } else + if (line < 10) { + line = (line - 2) * 4; + line = nv_rd32(gpio, 0x60081c) >> line; + return !!(line & 0x04); + } else + if (line < 14) { + line = (line - 10) * 4; + line = nv_rd32(gpio, 0x600850) >> line; + return !!(line & 0x04); + } + + return -EINVAL; +} + +static int +nv10_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out) +{ + u32 reg, mask, data; + + if (line < 2) { + line = line * 16; + reg = 0x600818; + mask = 0x00000011; + data = (dir << 4) | out; + } else + if (line < 10) { + line = (line - 2) * 4; + reg = 0x60081c; + mask = 0x00000003; + data = (dir << 1) | out; + } else + if (line < 14) { + line = (line - 10) * 4; + reg = 0x600850; + mask = 0x00000003; + data = (dir << 1) | out; + } else { + return -EINVAL; + } + + nv_mask(gpio, reg, mask << line, data << line); + return 0; +} + +static void +nv10_gpio_intr(struct nouveau_subdev *subdev) +{ + struct nv10_gpio_priv *priv = (void *)subdev; + u32 intr = nv_rd32(priv, 0x001104); + u32 hi = (intr & 0x0000ffff) >> 0; + u32 lo = (intr & 0xffff0000) >> 16; + int i; + + for (i = 0; (hi | lo) && i < 32; i++) { + if ((hi | lo) & (1 << i)) + nouveau_event_trigger(priv->base.events, i); + } + + nv_wr32(priv, 0x001104, intr); +} + +static void +nv10_gpio_intr_enable(struct nouveau_event *event, int line) +{ + nv_wr32(event->priv, 0x001104, 0x00010001 << line); + nv_mask(event->priv, 0x001144, 0x00010001 << line, 0x00010001 << line); +} + +static void +nv10_gpio_intr_disable(struct nouveau_event *event, int line) +{ + nv_wr32(event->priv, 0x001104, 0x00010001 << line); + nv_mask(event->priv, 0x001144, 0x00010001 << line, 0x00000000); +} + +static int +nv10_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv10_gpio_priv *priv; + int ret; + + ret = nouveau_gpio_create(parent, engine, oclass, 16, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.drive = nv10_gpio_drive; + priv->base.sense = nv10_gpio_sense; + priv->base.events->priv = priv; + priv->base.events->enable = nv10_gpio_intr_enable; + priv->base.events->disable = nv10_gpio_intr_disable; + nv_subdev(priv)->intr = nv10_gpio_intr; + return 0; +} + +static void +nv10_gpio_dtor(struct nouveau_object *object) +{ + struct nv10_gpio_priv *priv = (void *)object; + nouveau_gpio_destroy(&priv->base); +} + +static int +nv10_gpio_init(struct nouveau_object *object) +{ + struct nv10_gpio_priv *priv = (void *)object; + int ret; + + ret = nouveau_gpio_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x001144, 0x00000000); + nv_wr32(priv, 0x001104, 0xffffffff); + return 0; +} + +static int +nv10_gpio_fini(struct nouveau_object *object, bool suspend) +{ + struct nv10_gpio_priv *priv = (void *)object; + nv_wr32(priv, 0x001144, 0x00000000); + return nouveau_gpio_fini(&priv->base, suspend); +} + +struct nouveau_oclass +nv10_gpio_oclass = { + .handle = NV_SUBDEV(GPIO, 0x10), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv10_gpio_ctor, + .dtor = nv10_gpio_dtor, + .init = nv10_gpio_init, + .fini = nv10_gpio_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c new file mode 100644 index 0000000..bf489dc --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c @@ -0,0 +1,212 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "priv.h" + +struct nv50_gpio_priv { + struct nouveau_gpio base; +}; + +static void +nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match) +{ + struct nouveau_bios *bios = nouveau_bios(gpio); + struct nv50_gpio_priv *priv = (void *)gpio; + u8 ver, len; + u16 entry; + int ent = -1; + + while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) { + static const u32 regs[] = { 0xe100, 0xe28c }; + u32 data = nv_ro32(bios, entry); + u8 line = (data & 0x0000001f); + u8 func = (data & 0x0000ff00) >> 8; + u8 defs = !!(data & 0x01000000); + u8 unk0 = !!(data & 0x02000000); + u8 unk1 = !!(data & 0x04000000); + u32 val = (unk1 << 16) | unk0; + u32 reg = regs[line >> 4]; line &= 0x0f; + + if ( func == DCB_GPIO_UNUSED || + (match != DCB_GPIO_UNUSED && match != func)) + continue; + + gpio->set(gpio, 0, func, line, defs); + + nv_mask(priv, reg, 0x00010001 << line, val << line); + } +} + +static int +nv50_gpio_location(int line, u32 *reg, u32 *shift) +{ + const u32 nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; + + if (line >= 32) + return -EINVAL; + + *reg = nv50_gpio_reg[line >> 3]; + *shift = (line & 7) << 2; + return 0; +} + +static int +nv50_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out) +{ + u32 reg, shift; + + if (nv50_gpio_location(line, ®, &shift)) + return -EINVAL; + + nv_mask(gpio, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift); + return 0; +} + +static int +nv50_gpio_sense(struct nouveau_gpio *gpio, int line) +{ + u32 reg, shift; + + if (nv50_gpio_location(line, ®, &shift)) + return -EINVAL; + + return !!(nv_rd32(gpio, reg) & (4 << shift)); +} + +void +nv50_gpio_intr(struct nouveau_subdev *subdev) +{ + struct nv50_gpio_priv *priv = (void *)subdev; + u32 intr0, intr1 = 0; + u32 hi, lo; + int i; + + intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050); + if (nv_device(priv)->chipset >= 0x90) + intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070); + + hi = (intr0 & 0x0000ffff) | (intr1 << 16); + lo = (intr0 >> 16) | (intr1 & 0xffff0000); + + for (i = 0; (hi | lo) && i < 32; i++) { + if ((hi | lo) & (1 << i)) + nouveau_event_trigger(priv->base.events, i); + } + + nv_wr32(priv, 0xe054, intr0); + if (nv_device(priv)->chipset >= 0x90) + nv_wr32(priv, 0xe074, intr1); +} + +void +nv50_gpio_intr_enable(struct nouveau_event *event, int line) +{ + const u32 addr = line < 16 ? 0xe050 : 0xe070; + const u32 mask = 0x00010001 << (line & 0xf); + nv_wr32(event->priv, addr + 0x04, mask); + nv_mask(event->priv, addr + 0x00, mask, mask); +} + +void +nv50_gpio_intr_disable(struct nouveau_event *event, int line) +{ + const u32 addr = line < 16 ? 0xe050 : 0xe070; + const u32 mask = 0x00010001 << (line & 0xf); + nv_wr32(event->priv, addr + 0x04, mask); + nv_mask(event->priv, addr + 0x00, mask, 0x00000000); +} + +static int +nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_gpio_priv *priv; + int ret; + + ret = nouveau_gpio_create(parent, engine, oclass, + nv_device(parent)->chipset >= 0x90 ? 32 : 16, + &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.reset = nv50_gpio_reset; + priv->base.drive = nv50_gpio_drive; + priv->base.sense = nv50_gpio_sense; + priv->base.events->priv = priv; + priv->base.events->enable = nv50_gpio_intr_enable; + priv->base.events->disable = nv50_gpio_intr_disable; + nv_subdev(priv)->intr = nv50_gpio_intr; + return 0; +} + +void +nv50_gpio_dtor(struct nouveau_object *object) +{ + struct nv50_gpio_priv *priv = (void *)object; + nouveau_gpio_destroy(&priv->base); +} + +int +nv50_gpio_init(struct nouveau_object *object) +{ + struct nv50_gpio_priv *priv = (void *)object; + int ret; + + ret = nouveau_gpio_init(&priv->base); + if (ret) + return ret; + + /* disable, and ack any pending gpio interrupts */ + nv_wr32(priv, 0xe050, 0x00000000); + nv_wr32(priv, 0xe054, 0xffffffff); + if (nv_device(priv)->chipset >= 0x90) { + nv_wr32(priv, 0xe070, 0x00000000); + nv_wr32(priv, 0xe074, 0xffffffff); + } + + return 0; +} + +int +nv50_gpio_fini(struct nouveau_object *object, bool suspend) +{ + struct nv50_gpio_priv *priv = (void *)object; + nv_wr32(priv, 0xe050, 0x00000000); + if (nv_device(priv)->chipset >= 0x90) + nv_wr32(priv, 0xe070, 0x00000000); + return nouveau_gpio_fini(&priv->base, suspend); +} + +struct nouveau_oclass +nv50_gpio_oclass = { + .handle = NV_SUBDEV(GPIO, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_gpio_ctor, + .dtor = nv50_gpio_dtor, + .init = nv50_gpio_init, + .fini = nv50_gpio_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c new file mode 100644 index 0000000..010431e --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c @@ -0,0 +1,107 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "priv.h" + +struct nvd0_gpio_priv { + struct nouveau_gpio base; +}; + +void +nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match) +{ + struct nouveau_bios *bios = nouveau_bios(gpio); + struct nvd0_gpio_priv *priv = (void *)gpio; + u8 ver, len; + u16 entry; + int ent = -1; + + while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) { + u32 data = nv_ro32(bios, entry); + u8 line = (data & 0x0000003f); + u8 defs = !!(data & 0x00000080); + u8 func = (data & 0x0000ff00) >> 8; + u8 unk0 = (data & 0x00ff0000) >> 16; + u8 unk1 = (data & 0x1f000000) >> 24; + + if ( func == DCB_GPIO_UNUSED || + (match != DCB_GPIO_UNUSED && match != func)) + continue; + + gpio->set(gpio, 0, func, line, defs); + + nv_mask(priv, 0x00d610 + (line * 4), 0xff, unk0); + if (unk1--) + nv_mask(priv, 0x00d740 + (unk1 * 4), 0xff, line); + } +} + +int +nvd0_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out) +{ + u32 data = ((dir ^ 1) << 13) | (out << 12); + nv_mask(gpio, 0x00d610 + (line * 4), 0x00003000, data); + nv_mask(gpio, 0x00d604, 0x00000001, 0x00000001); /* update? */ + return 0; +} + +int +nvd0_gpio_sense(struct nouveau_gpio *gpio, int line) +{ + return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000); +} + +static int +nvd0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvd0_gpio_priv *priv; + int ret; + + ret = nouveau_gpio_create(parent, engine, oclass, 32, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.reset = nvd0_gpio_reset; + priv->base.drive = nvd0_gpio_drive; + priv->base.sense = nvd0_gpio_sense; + priv->base.events->priv = priv; + priv->base.events->enable = nv50_gpio_intr_enable; + priv->base.events->disable = nv50_gpio_intr_disable; + nv_subdev(priv)->intr = nv50_gpio_intr; + return 0; +} + +struct nouveau_oclass +nvd0_gpio_oclass = { + .handle = NV_SUBDEV(GPIO, 0xd0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvd0_gpio_ctor, + .dtor = nv50_gpio_dtor, + .init = nv50_gpio_init, + .fini = nv50_gpio_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c new file mode 100644 index 0000000..16b8c5b --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c @@ -0,0 +1,131 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "priv.h" + +struct nve0_gpio_priv { + struct nouveau_gpio base; +}; + +void +nve0_gpio_intr(struct nouveau_subdev *subdev) +{ + struct nve0_gpio_priv *priv = (void *)subdev; + u32 intr0 = nv_rd32(priv, 0xdc00) & nv_rd32(priv, 0xdc08); + u32 intr1 = nv_rd32(priv, 0xdc80) & nv_rd32(priv, 0xdc88); + u32 hi = (intr0 & 0x0000ffff) | (intr1 << 16); + u32 lo = (intr0 >> 16) | (intr1 & 0xffff0000); + int i; + + for (i = 0; (hi | lo) && i < 32; i++) { + if ((hi | lo) & (1 << i)) + nouveau_event_trigger(priv->base.events, i); + } + + nv_wr32(priv, 0xdc00, intr0); + nv_wr32(priv, 0xdc88, intr1); +} + +void +nve0_gpio_intr_enable(struct nouveau_event *event, int line) +{ + const u32 addr = line < 16 ? 0xdc00 : 0xdc80; + const u32 mask = 0x00010001 << (line & 0xf); + nv_wr32(event->priv, addr + 0x08, mask); + nv_mask(event->priv, addr + 0x00, mask, mask); +} + +void +nve0_gpio_intr_disable(struct nouveau_event *event, int line) +{ + const u32 addr = line < 16 ? 0xdc00 : 0xdc80; + const u32 mask = 0x00010001 << (line & 0xf); + nv_wr32(event->priv, addr + 0x08, mask); + nv_mask(event->priv, addr + 0x00, mask, 0x00000000); +} + +int +nve0_gpio_fini(struct nouveau_object *object, bool suspend) +{ + struct nve0_gpio_priv *priv = (void *)object; + nv_wr32(priv, 0xdc08, 0x00000000); + nv_wr32(priv, 0xdc88, 0x00000000); + return nouveau_gpio_fini(&priv->base, suspend); +} + +int +nve0_gpio_init(struct nouveau_object *object) +{ + struct nve0_gpio_priv *priv = (void *)object; + int ret; + + ret = nouveau_gpio_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0xdc00, 0xffffffff); + nv_wr32(priv, 0xdc80, 0xffffffff); + return 0; +} + +void +nve0_gpio_dtor(struct nouveau_object *object) +{ + struct nve0_gpio_priv *priv = (void *)object; + nouveau_gpio_destroy(&priv->base); +} + +static int +nve0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nve0_gpio_priv *priv; + int ret; + + ret = nouveau_gpio_create(parent, engine, oclass, 32, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.reset = nvd0_gpio_reset; + priv->base.drive = nvd0_gpio_drive; + priv->base.sense = nvd0_gpio_sense; + priv->base.events->priv = priv; + priv->base.events->enable = nve0_gpio_intr_enable; + priv->base.events->disable = nve0_gpio_intr_disable; + nv_subdev(priv)->intr = nve0_gpio_intr; + return 0; +} + +struct nouveau_oclass +nve0_gpio_oclass = { + .handle = NV_SUBDEV(GPIO, 0xe0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nve0_gpio_ctor, + .dtor = nv50_gpio_dtor, + .init = nve0_gpio_init, + .fini = nve0_gpio_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h new file mode 100644 index 0000000..2ee1c89 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h @@ -0,0 +1,17 @@ +#ifndef __NVKM_GPIO_H__ +#define __NVKM_GPIO_H__ + +#include <subdev/gpio.h> + +void nv50_gpio_dtor(struct nouveau_object *); +int nv50_gpio_init(struct nouveau_object *); +int nv50_gpio_fini(struct nouveau_object *, bool); +void nv50_gpio_intr(struct nouveau_subdev *); +void nv50_gpio_intr_enable(struct nouveau_event *, int line); +void nv50_gpio_intr_disable(struct nouveau_event *, int line); + +void nvd0_gpio_reset(struct nouveau_gpio *, u8); +int nvd0_gpio_drive(struct nouveau_gpio *, int, int, int); +int nvd0_gpio_sense(struct nouveau_gpio *, int); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c new file mode 100644 index 0000000..dec94e9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c @@ -0,0 +1,279 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ + +#include <subdev/i2c.h> + +struct anx9805_i2c_port { + struct nouveau_i2c_port base; + u32 addr; + u32 ctrl; +}; + +static int +anx9805_train(struct nouveau_i2c_port *port, int link_nr, int link_bw, bool enh) +{ + struct anx9805_i2c_port *chan = (void *)port; + struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent; + u8 tmp, i; + + nv_wri2cr(mast, chan->addr, 0xa0, link_bw); + nv_wri2cr(mast, chan->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00)); + nv_wri2cr(mast, chan->addr, 0xa2, 0x01); + nv_wri2cr(mast, chan->addr, 0xa8, 0x01); + + i = 0; + while ((tmp = nv_rdi2cr(mast, chan->addr, 0xa8)) & 0x01) { + mdelay(5); + if (i++ == 100) { + nv_error(port, "link training timed out\n"); + return -ETIMEDOUT; + } + } + + if (tmp & 0x70) { + nv_error(port, "link training failed: 0x%02x\n", tmp); + return -EIO; + } + + return 1; +} + +static int +anx9805_aux(struct nouveau_i2c_port *port, u8 type, u32 addr, u8 *data, u8 size) +{ + struct anx9805_i2c_port *chan = (void *)port; + struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent; + int i, ret = -ETIMEDOUT; + u8 tmp; + + tmp = nv_rdi2cr(mast, chan->ctrl, 0x07) & ~0x04; + nv_wri2cr(mast, chan->ctrl, 0x07, tmp | 0x04); + nv_wri2cr(mast, chan->ctrl, 0x07, tmp); + nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01); + + nv_wri2cr(mast, chan->addr, 0xe4, 0x80); + for (i = 0; !(type & 1) && i < size; i++) + nv_wri2cr(mast, chan->addr, 0xf0 + i, data[i]); + nv_wri2cr(mast, chan->addr, 0xe5, ((size - 1) << 4) | type); + nv_wri2cr(mast, chan->addr, 0xe6, (addr & 0x000ff) >> 0); + nv_wri2cr(mast, chan->addr, 0xe7, (addr & 0x0ff00) >> 8); + nv_wri2cr(mast, chan->addr, 0xe8, (addr & 0xf0000) >> 16); + nv_wri2cr(mast, chan->addr, 0xe9, 0x01); + + i = 0; + while ((tmp = nv_rdi2cr(mast, chan->addr, 0xe9)) & 0x01) { + mdelay(5); + if (i++ == 32) + goto done; + } + + if ((tmp = nv_rdi2cr(mast, chan->ctrl, 0xf7)) & 0x01) { + ret = -EIO; + goto done; + } + + for (i = 0; (type & 1) && i < size; i++) + data[i] = nv_rdi2cr(mast, chan->addr, 0xf0 + i); + ret = 0; +done: + nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01); + return ret; +} + +static const struct nouveau_i2c_func +anx9805_aux_func = { + .aux = anx9805_aux, + .lnk_ctl = anx9805_train, +}; + +static int +anx9805_aux_chan_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 index, + struct nouveau_object **pobject) +{ + struct nouveau_i2c_port *mast = (void *)parent; + struct anx9805_i2c_port *chan; + int ret; + + ret = nouveau_i2c_port_create(parent, engine, oclass, index, + &nouveau_i2c_aux_algo, &chan); + *pobject = nv_object(chan); + if (ret) + return ret; + + switch ((oclass->handle & 0xff00) >> 8) { + case 0x0d: + chan->addr = 0x38; + chan->ctrl = 0x39; + break; + case 0x0e: + chan->addr = 0x3c; + chan->ctrl = 0x3b; + break; + default: + BUG_ON(1); + } + + if (mast->adapter.algo == &i2c_bit_algo) { + struct i2c_algo_bit_data *algo = mast->adapter.algo_data; + algo->udelay = max(algo->udelay, 40); + } + + chan->base.func = &anx9805_aux_func; + return 0; +} + +static struct nouveau_ofuncs +anx9805_aux_ofuncs = { + .ctor = anx9805_aux_chan_ctor, + .dtor = _nouveau_i2c_port_dtor, + .init = _nouveau_i2c_port_init, + .fini = _nouveau_i2c_port_fini, +}; + +static int +anx9805_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) +{ + struct anx9805_i2c_port *port = adap->algo_data; + struct nouveau_i2c_port *mast = (void *)nv_object(port)->parent; + struct i2c_msg *msg = msgs; + int ret = -ETIMEDOUT; + int i, j, cnt = num; + u8 seg = 0x00, off = 0x00, tmp; + + tmp = nv_rdi2cr(mast, port->ctrl, 0x07) & ~0x10; + nv_wri2cr(mast, port->ctrl, 0x07, tmp | 0x10); + nv_wri2cr(mast, port->ctrl, 0x07, tmp); + nv_wri2cr(mast, port->addr, 0x43, 0x05); + mdelay(5); + + while (cnt--) { + if ( (msg->flags & I2C_M_RD) && msg->addr == 0x50) { + nv_wri2cr(mast, port->addr, 0x40, msg->addr << 1); + nv_wri2cr(mast, port->addr, 0x41, seg); + nv_wri2cr(mast, port->addr, 0x42, off); + nv_wri2cr(mast, port->addr, 0x44, msg->len); + nv_wri2cr(mast, port->addr, 0x45, 0x00); + nv_wri2cr(mast, port->addr, 0x43, 0x01); + for (i = 0; i < msg->len; i++) { + j = 0; + while (nv_rdi2cr(mast, port->addr, 0x46) & 0x10) { + mdelay(5); + if (j++ == 32) + goto done; + } + msg->buf[i] = nv_rdi2cr(mast, port->addr, 0x47); + } + } else + if (!(msg->flags & I2C_M_RD)) { + if (msg->addr == 0x50 && msg->len == 0x01) { + off = msg->buf[0]; + } else + if (msg->addr == 0x30 && msg->len == 0x01) { + seg = msg->buf[0]; + } else + goto done; + } else { + goto done; + } + msg++; + } + + ret = num; +done: + nv_wri2cr(mast, port->addr, 0x43, 0x00); + return ret; +} + +static u32 +anx9805_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm +anx9805_i2c_algo = { + .master_xfer = anx9805_xfer, + .functionality = anx9805_func +}; + +static const struct nouveau_i2c_func +anx9805_i2c_func = { +}; + +static int +anx9805_ddc_port_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 index, + struct nouveau_object **pobject) +{ + struct nouveau_i2c_port *mast = (void *)parent; + struct anx9805_i2c_port *port; + int ret; + + ret = nouveau_i2c_port_create(parent, engine, oclass, index, + &anx9805_i2c_algo, &port); + *pobject = nv_object(port); + if (ret) + return ret; + + switch ((oclass->handle & 0xff00) >> 8) { + case 0x0d: + port->addr = 0x3d; + port->ctrl = 0x39; + break; + case 0x0e: + port->addr = 0x3f; + port->ctrl = 0x3b; + break; + default: + BUG_ON(1); + } + + if (mast->adapter.algo == &i2c_bit_algo) { + struct i2c_algo_bit_data *algo = mast->adapter.algo_data; + algo->udelay = max(algo->udelay, 40); + } + + port->base.func = &anx9805_i2c_func; + return 0; +} + +static struct nouveau_ofuncs +anx9805_ddc_ofuncs = { + .ctor = anx9805_ddc_port_ctor, + .dtor = _nouveau_i2c_port_dtor, + .init = _nouveau_i2c_port_init, + .fini = _nouveau_i2c_port_fini, +}; + +struct nouveau_oclass +nouveau_anx9805_sclass[] = { + { .handle = NV_I2C_TYPE_EXTDDC(0x0d), .ofuncs = &anx9805_ddc_ofuncs }, + { .handle = NV_I2C_TYPE_EXTAUX(0x0d), .ofuncs = &anx9805_aux_ofuncs }, + { .handle = NV_I2C_TYPE_EXTDDC(0x0e), .ofuncs = &anx9805_ddc_ofuncs }, + { .handle = NV_I2C_TYPE_EXTAUX(0x0e), .ofuncs = &anx9805_aux_ofuncs }, + {} +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c new file mode 100644 index 0000000..5de074a --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c @@ -0,0 +1,100 @@ +/* + * Copyright 2009 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/i2c.h> + +int +nv_rdaux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size) +{ + if (port->func->aux) { + if (port->func->acquire) + port->func->acquire(port); + return port->func->aux(port, 9, addr, data, size); + } + return -ENODEV; +} + +int +nv_wraux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size) +{ + if (port->func->aux) { + if (port->func->acquire) + port->func->acquire(port); + return port->func->aux(port, 8, addr, data, size); + } + return -ENODEV; +} + +static int +aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) +{ + struct nouveau_i2c_port *port = adap->algo_data; + struct i2c_msg *msg = msgs; + int ret, mcnt = num; + + if (!port->func->aux) + return -ENODEV; + if ( port->func->acquire) + port->func->acquire(port); + + while (mcnt--) { + u8 remaining = msg->len; + u8 *ptr = msg->buf; + + while (remaining) { + u8 cnt = (remaining > 16) ? 16 : remaining; + u8 cmd; + + if (msg->flags & I2C_M_RD) + cmd = 1; + else + cmd = 0; + + if (mcnt || remaining > 16) + cmd |= 4; /* MOT */ + + ret = port->func->aux(port, cmd, msg->addr, ptr, cnt); + if (ret < 0) + return ret; + + ptr += cnt; + remaining -= cnt; + } + + msg++; + } + + return num; +} + +static u32 +aux_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +const struct i2c_algorithm nouveau_i2c_aux_algo = { + .master_xfer = aux_xfer, + .functionality = aux_func +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c new file mode 100644 index 0000000..8ae2625 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c @@ -0,0 +1,365 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/option.h> + +#include <subdev/bios.h> +#include <subdev/bios/dcb.h> +#include <subdev/bios/i2c.h> +#include <subdev/i2c.h> +#include <subdev/vga.h> + +/****************************************************************************** + * interface to linux i2c bit-banging algorithm + *****************************************************************************/ + +#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT +#define CSTMSEL true +#else +#define CSTMSEL false +#endif + +static int +nouveau_i2c_pre_xfer(struct i2c_adapter *adap) +{ + struct i2c_algo_bit_data *bit = adap->algo_data; + struct nouveau_i2c_port *port = bit->data; + if (port->func->acquire) + port->func->acquire(port); + return 0; +} + +static void +nouveau_i2c_setscl(void *data, int state) +{ + struct nouveau_i2c_port *port = data; + port->func->drive_scl(port, state); +} + +static void +nouveau_i2c_setsda(void *data, int state) +{ + struct nouveau_i2c_port *port = data; + port->func->drive_sda(port, state); +} + +static int +nouveau_i2c_getscl(void *data) +{ + struct nouveau_i2c_port *port = data; + return port->func->sense_scl(port); +} + +static int +nouveau_i2c_getsda(void *data) +{ + struct nouveau_i2c_port *port = data; + return port->func->sense_sda(port); +} + +/****************************************************************************** + * base i2c "port" class implementation + *****************************************************************************/ + +void +_nouveau_i2c_port_dtor(struct nouveau_object *object) +{ + struct nouveau_i2c_port *port = (void *)object; + i2c_del_adapter(&port->adapter); + nouveau_object_destroy(&port->base); +} + +int +nouveau_i2c_port_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, u8 index, + const struct i2c_algorithm *algo, + int size, void **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct nouveau_i2c *i2c = (void *)engine; + struct nouveau_i2c_port *port; + int ret; + + ret = nouveau_object_create_(parent, engine, oclass, 0, size, pobject); + port = *pobject; + if (ret) + return ret; + + snprintf(port->adapter.name, sizeof(port->adapter.name), + "nouveau-%s-%d", device->name, index); + port->adapter.owner = THIS_MODULE; + port->adapter.dev.parent = &device->pdev->dev; + port->index = index; + i2c_set_adapdata(&port->adapter, i2c); + + if ( algo == &nouveau_i2c_bit_algo && + !nouveau_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) { + struct i2c_algo_bit_data *bit; + + bit = kzalloc(sizeof(*bit), GFP_KERNEL); + if (!bit) + return -ENOMEM; + + bit->udelay = 10; + bit->timeout = usecs_to_jiffies(2200); + bit->data = port; + bit->pre_xfer = nouveau_i2c_pre_xfer; + bit->setsda = nouveau_i2c_setsda; + bit->setscl = nouveau_i2c_setscl; + bit->getsda = nouveau_i2c_getsda; + bit->getscl = nouveau_i2c_getscl; + + port->adapter.algo_data = bit; + ret = i2c_bit_add_bus(&port->adapter); + } else { + port->adapter.algo_data = port; + port->adapter.algo = algo; + ret = i2c_add_adapter(&port->adapter); + } + + /* drop port's i2c subdev refcount, i2c handles this itself */ + if (ret == 0) + list_add_tail(&port->head, &i2c->ports); + return ret; +} + +/****************************************************************************** + * base i2c subdev class implementation + *****************************************************************************/ + +static struct nouveau_i2c_port * +nouveau_i2c_find(struct nouveau_i2c *i2c, u8 index) +{ + struct nouveau_bios *bios = nouveau_bios(i2c); + struct nouveau_i2c_port *port; + + if (index == NV_I2C_DEFAULT(0) || + index == NV_I2C_DEFAULT(1)) { + u8 ver, hdr, cnt, len; + u16 i2c = dcb_i2c_table(bios, &ver, &hdr, &cnt, &len); + if (i2c && ver >= 0x30) { + u8 auxidx = nv_ro08(bios, i2c + 4); + if (index == NV_I2C_DEFAULT(0)) + index = (auxidx & 0x0f) >> 0; + else + index = (auxidx & 0xf0) >> 4; + } else { + index = 2; + } + } + + list_for_each_entry(port, &i2c->ports, head) { + if (port->index == index) + return port; + } + + return NULL; +} + +static struct nouveau_i2c_port * +nouveau_i2c_find_type(struct nouveau_i2c *i2c, u16 type) +{ + struct nouveau_i2c_port *port; + + list_for_each_entry(port, &i2c->ports, head) { + if (nv_hclass(port) == type) + return port; + } + + return NULL; +} + +static int +nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what, + struct i2c_board_info *info, + bool (*match)(struct nouveau_i2c_port *, + struct i2c_board_info *)) +{ + struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index); + int i; + + if (!port) { + nv_debug(i2c, "no bus when probing %s on %d\n", what, index); + return -ENODEV; + } + + nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index); + for (i = 0; info[i].addr; i++) { + if (nv_probe_i2c(port, info[i].addr) && + (!match || match(port, &info[i]))) { + nv_info(i2c, "detected %s: %s\n", what, info[i].type); + return i; + } + } + + nv_debug(i2c, "no devices found.\n"); + return -ENODEV; +} + +int +_nouveau_i2c_fini(struct nouveau_object *object, bool suspend) +{ + struct nouveau_i2c *i2c = (void *)object; + struct nouveau_i2c_port *port; + int ret; + + list_for_each_entry(port, &i2c->ports, head) { + ret = nv_ofuncs(port)->fini(nv_object(port), suspend); + if (ret && suspend) + goto fail; + } + + return nouveau_subdev_fini(&i2c->base, suspend); +fail: + list_for_each_entry_continue_reverse(port, &i2c->ports, head) { + nv_ofuncs(port)->init(nv_object(port)); + } + + return ret; +} + +int +_nouveau_i2c_init(struct nouveau_object *object) +{ + struct nouveau_i2c *i2c = (void *)object; + struct nouveau_i2c_port *port; + int ret; + + ret = nouveau_subdev_init(&i2c->base); + if (ret == 0) { + list_for_each_entry(port, &i2c->ports, head) { + ret = nv_ofuncs(port)->init(nv_object(port)); + if (ret) + goto fail; + } + } + + return ret; +fail: + list_for_each_entry_continue_reverse(port, &i2c->ports, head) { + nv_ofuncs(port)->fini(nv_object(port), false); + } + + return ret; +} + +void +_nouveau_i2c_dtor(struct nouveau_object *object) +{ + struct nouveau_i2c *i2c = (void *)object; + struct nouveau_i2c_port *port, *temp; + + list_for_each_entry_safe(port, temp, &i2c->ports, head) { + nouveau_object_ref(NULL, (struct nouveau_object **)&port); + } + + nouveau_subdev_destroy(&i2c->base); +} + +static struct nouveau_oclass * +nouveau_i2c_extdev_sclass[] = { + nouveau_anx9805_sclass, +}; + +int +nouveau_i2c_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, + struct nouveau_oclass *sclass, + int length, void **pobject) +{ + struct nouveau_bios *bios = nouveau_bios(parent); + struct nouveau_i2c *i2c; + struct nouveau_object *object; + struct dcb_i2c_entry info; + int ret, i, j, index = -1; + struct dcb_output outp; + u8 ver, hdr; + u32 data; + + ret = nouveau_subdev_create(parent, engine, oclass, 0, + "I2C", "i2c", &i2c); + *pobject = nv_object(i2c); + if (ret) + return ret; + + i2c->find = nouveau_i2c_find; + i2c->find_type = nouveau_i2c_find_type; + i2c->identify = nouveau_i2c_identify; + INIT_LIST_HEAD(&i2c->ports); + + while (!dcb_i2c_parse(bios, ++index, &info)) { + if (info.type == DCB_I2C_UNUSED) + continue; + + oclass = sclass; + do { + ret = -EINVAL; + if (oclass->handle == info.type) { + ret = nouveau_object_ctor(*pobject, *pobject, + oclass, &info, + index, &object); + } + } while (ret && (++oclass)->handle); + } + + /* in addition to the busses specified in the i2c table, there + * may be ddc/aux channels hiding behind external tmds/dp/etc + * transmitters. + */ + index = ((index + 0x0f) / 0x10) * 0x10; + i = -1; + while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &outp))) { + if (!outp.location || !outp.extdev) + continue; + + switch (outp.type) { + case DCB_OUTPUT_TMDS: + info.type = NV_I2C_TYPE_EXTDDC(outp.extdev); + break; + case DCB_OUTPUT_DP: + info.type = NV_I2C_TYPE_EXTAUX(outp.extdev); + break; + default: + continue; + } + + ret = -ENODEV; + j = -1; + while (ret && ++j < ARRAY_SIZE(nouveau_i2c_extdev_sclass)) { + parent = nv_object(i2c->find(i2c, outp.i2c_index)); + oclass = nouveau_i2c_extdev_sclass[j]; + do { + if (oclass->handle != info.type) + continue; + ret = nouveau_object_ctor(parent, *pobject, + oclass, NULL, + index++, &object); + } while (ret && (++oclass)->handle); + } + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c new file mode 100644 index 0000000..a6e72d3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c @@ -0,0 +1,232 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "subdev/i2c.h" + +#ifdef CONFIG_NOUVEAU_I2C_INTERNAL +#define T_TIMEOUT 2200000 +#define T_RISEFALL 1000 +#define T_HOLD 5000 + +static inline void +i2c_drive_scl(struct nouveau_i2c_port *port, int state) +{ + port->func->drive_scl(port, state); +} + +static inline void +i2c_drive_sda(struct nouveau_i2c_port *port, int state) +{ + port->func->drive_sda(port, state); +} + +static inline int +i2c_sense_scl(struct nouveau_i2c_port *port) +{ + return port->func->sense_scl(port); +} + +static inline int +i2c_sense_sda(struct nouveau_i2c_port *port) +{ + return port->func->sense_sda(port); +} + +static void +i2c_delay(struct nouveau_i2c_port *port, u32 nsec) +{ + udelay((nsec + 500) / 1000); +} + +static bool +i2c_raise_scl(struct nouveau_i2c_port *port) +{ + u32 timeout = T_TIMEOUT / T_RISEFALL; + + i2c_drive_scl(port, 1); + do { + i2c_delay(port, T_RISEFALL); + } while (!i2c_sense_scl(port) && --timeout); + + return timeout != 0; +} + +static int +i2c_start(struct nouveau_i2c_port *port) +{ + int ret = 0; + + if (!i2c_sense_scl(port) || + !i2c_sense_sda(port)) { + i2c_drive_scl(port, 0); + i2c_drive_sda(port, 1); + if (!i2c_raise_scl(port)) + ret = -EBUSY; + } + + i2c_drive_sda(port, 0); + i2c_delay(port, T_HOLD); + i2c_drive_scl(port, 0); + i2c_delay(port, T_HOLD); + return ret; +} + +static void +i2c_stop(struct nouveau_i2c_port *port) +{ + i2c_drive_scl(port, 0); + i2c_drive_sda(port, 0); + i2c_delay(port, T_RISEFALL); + + i2c_drive_scl(port, 1); + i2c_delay(port, T_HOLD); + i2c_drive_sda(port, 1); + i2c_delay(port, T_HOLD); +} + +static int +i2c_bitw(struct nouveau_i2c_port *port, int sda) +{ + i2c_drive_sda(port, sda); + i2c_delay(port, T_RISEFALL); + + if (!i2c_raise_scl(port)) + return -ETIMEDOUT; + i2c_delay(port, T_HOLD); + + i2c_drive_scl(port, 0); + i2c_delay(port, T_HOLD); + return 0; +} + +static int +i2c_bitr(struct nouveau_i2c_port *port) +{ + int sda; + + i2c_drive_sda(port, 1); + i2c_delay(port, T_RISEFALL); + + if (!i2c_raise_scl(port)) + return -ETIMEDOUT; + i2c_delay(port, T_HOLD); + + sda = i2c_sense_sda(port); + + i2c_drive_scl(port, 0); + i2c_delay(port, T_HOLD); + return sda; +} + +static int +i2c_get_byte(struct nouveau_i2c_port *port, u8 *byte, bool last) +{ + int i, bit; + + *byte = 0; + for (i = 7; i >= 0; i--) { + bit = i2c_bitr(port); + if (bit < 0) + return bit; + *byte |= bit << i; + } + + return i2c_bitw(port, last ? 1 : 0); +} + +static int +i2c_put_byte(struct nouveau_i2c_port *port, u8 byte) +{ + int i, ret; + for (i = 7; i >= 0; i--) { + ret = i2c_bitw(port, !!(byte & (1 << i))); + if (ret < 0) + return ret; + } + + ret = i2c_bitr(port); + if (ret == 1) /* nack */ + ret = -EIO; + return ret; +} + +static int +i2c_addr(struct nouveau_i2c_port *port, struct i2c_msg *msg) +{ + u32 addr = msg->addr << 1; + if (msg->flags & I2C_M_RD) + addr |= 1; + return i2c_put_byte(port, addr); +} + +static int +i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) +{ + struct nouveau_i2c_port *port = adap->algo_data; + struct i2c_msg *msg = msgs; + int ret = 0, mcnt = num; + + if (port->func->acquire) + port->func->acquire(port); + + while (!ret && mcnt--) { + u8 remaining = msg->len; + u8 *ptr = msg->buf; + + ret = i2c_start(port); + if (ret == 0) + ret = i2c_addr(port, msg); + + if (msg->flags & I2C_M_RD) { + while (!ret && remaining--) + ret = i2c_get_byte(port, ptr++, !remaining); + } else { + while (!ret && remaining--) + ret = i2c_put_byte(port, *ptr++); + } + + msg++; + } + + i2c_stop(port); + return (ret < 0) ? ret : num; +} +#else +static int +i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) +{ + return -ENODEV; +} +#endif + +static u32 +i2c_bit_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +const struct i2c_algorithm nouveau_i2c_bit_algo = { + .master_xfer = i2c_bit_xfer, + .functionality = i2c_bit_func +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c new file mode 100644 index 0000000..2ad1884 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c @@ -0,0 +1,143 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/i2c.h> +#include <subdev/vga.h> + +struct nv04_i2c_priv { + struct nouveau_i2c base; +}; + +struct nv04_i2c_port { + struct nouveau_i2c_port base; + u8 drive; + u8 sense; +}; + +static void +nv04_i2c_drive_scl(struct nouveau_i2c_port *base, int state) +{ + struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine; + struct nv04_i2c_port *port = (void *)base; + u8 val = nv_rdvgac(priv, 0, port->drive); + if (state) val |= 0x20; + else val &= 0xdf; + nv_wrvgac(priv, 0, port->drive, val | 0x01); +} + +static void +nv04_i2c_drive_sda(struct nouveau_i2c_port *base, int state) +{ + struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine; + struct nv04_i2c_port *port = (void *)base; + u8 val = nv_rdvgac(priv, 0, port->drive); + if (state) val |= 0x10; + else val &= 0xef; + nv_wrvgac(priv, 0, port->drive, val | 0x01); +} + +static int +nv04_i2c_sense_scl(struct nouveau_i2c_port *base) +{ + struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine; + struct nv04_i2c_port *port = (void *)base; + return !!(nv_rdvgac(priv, 0, port->sense) & 0x04); +} + +static int +nv04_i2c_sense_sda(struct nouveau_i2c_port *base) +{ + struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine; + struct nv04_i2c_port *port = (void *)base; + return !!(nv_rdvgac(priv, 0, port->sense) & 0x08); +} + +static const struct nouveau_i2c_func +nv04_i2c_func = { + .drive_scl = nv04_i2c_drive_scl, + .drive_sda = nv04_i2c_drive_sda, + .sense_scl = nv04_i2c_sense_scl, + .sense_sda = nv04_i2c_sense_sda, +}; + +static int +nv04_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 index, + struct nouveau_object **pobject) +{ + struct dcb_i2c_entry *info = data; + struct nv04_i2c_port *port; + int ret; + + ret = nouveau_i2c_port_create(parent, engine, oclass, index, + &nouveau_i2c_bit_algo, &port); + *pobject = nv_object(port); + if (ret) + return ret; + + port->base.func = &nv04_i2c_func; + port->drive = info->drive; + port->sense = info->sense; + return 0; +} + +static struct nouveau_oclass +nv04_i2c_sclass[] = { + { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV04_BIT), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_i2c_port_ctor, + .dtor = _nouveau_i2c_port_dtor, + .init = _nouveau_i2c_port_init, + .fini = _nouveau_i2c_port_fini, + }, + }, + {} +}; + +static int +nv04_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_i2c_priv *priv; + int ret; + + ret = nouveau_i2c_create(parent, engine, oclass, nv04_i2c_sclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +struct nouveau_oclass +nv04_i2c_oclass = { + .handle = NV_SUBDEV(I2C, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_i2c_ctor, + .dtor = _nouveau_i2c_dtor, + .init = _nouveau_i2c_init, + .fini = _nouveau_i2c_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c new file mode 100644 index 0000000..f501ae2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c @@ -0,0 +1,135 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/i2c.h> +#include <subdev/vga.h> + +struct nv4e_i2c_priv { + struct nouveau_i2c base; +}; + +struct nv4e_i2c_port { + struct nouveau_i2c_port base; + u32 addr; +}; + +static void +nv4e_i2c_drive_scl(struct nouveau_i2c_port *base, int state) +{ + struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine; + struct nv4e_i2c_port *port = (void *)base; + nv_mask(priv, port->addr, 0x2f, state ? 0x21 : 0x01); +} + +static void +nv4e_i2c_drive_sda(struct nouveau_i2c_port *base, int state) +{ + struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine; + struct nv4e_i2c_port *port = (void *)base; + nv_mask(priv, port->addr, 0x1f, state ? 0x11 : 0x01); +} + +static int +nv4e_i2c_sense_scl(struct nouveau_i2c_port *base) +{ + struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine; + struct nv4e_i2c_port *port = (void *)base; + return !!(nv_rd32(priv, port->addr) & 0x00040000); +} + +static int +nv4e_i2c_sense_sda(struct nouveau_i2c_port *base) +{ + struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine; + struct nv4e_i2c_port *port = (void *)base; + return !!(nv_rd32(priv, port->addr) & 0x00080000); +} + +static const struct nouveau_i2c_func +nv4e_i2c_func = { + .drive_scl = nv4e_i2c_drive_scl, + .drive_sda = nv4e_i2c_drive_sda, + .sense_scl = nv4e_i2c_sense_scl, + .sense_sda = nv4e_i2c_sense_sda, +}; + +static int +nv4e_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 index, + struct nouveau_object **pobject) +{ + struct dcb_i2c_entry *info = data; + struct nv4e_i2c_port *port; + int ret; + + ret = nouveau_i2c_port_create(parent, engine, oclass, index, + &nouveau_i2c_bit_algo, &port); + *pobject = nv_object(port); + if (ret) + return ret; + + port->base.func = &nv4e_i2c_func; + port->addr = 0x600800 + info->drive; + return 0; +} + +static struct nouveau_oclass +nv4e_i2c_sclass[] = { + { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV4E_BIT), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv4e_i2c_port_ctor, + .dtor = _nouveau_i2c_port_dtor, + .init = _nouveau_i2c_port_init, + .fini = _nouveau_i2c_port_fini, + }, + }, + {} +}; + +static int +nv4e_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv4e_i2c_priv *priv; + int ret; + + ret = nouveau_i2c_create(parent, engine, oclass, nv4e_i2c_sclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +struct nouveau_oclass +nv4e_i2c_oclass = { + .handle = NV_SUBDEV(I2C, 0x4e), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv4e_i2c_ctor, + .dtor = _nouveau_i2c_dtor, + .init = _nouveau_i2c_init, + .fini = _nouveau_i2c_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c new file mode 100644 index 0000000..378dfa3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c @@ -0,0 +1,149 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "nv50.h" + +void +nv50_i2c_drive_scl(struct nouveau_i2c_port *base, int state) +{ + struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine; + struct nv50_i2c_port *port = (void *)base; + if (state) port->state |= 0x01; + else port->state &= 0xfe; + nv_wr32(priv, port->addr, port->state); +} + +void +nv50_i2c_drive_sda(struct nouveau_i2c_port *base, int state) +{ + struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine; + struct nv50_i2c_port *port = (void *)base; + if (state) port->state |= 0x02; + else port->state &= 0xfd; + nv_wr32(priv, port->addr, port->state); +} + +int +nv50_i2c_sense_scl(struct nouveau_i2c_port *base) +{ + struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine; + struct nv50_i2c_port *port = (void *)base; + return !!(nv_rd32(priv, port->addr) & 0x00000001); +} + +int +nv50_i2c_sense_sda(struct nouveau_i2c_port *base) +{ + struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine; + struct nv50_i2c_port *port = (void *)base; + return !!(nv_rd32(priv, port->addr) & 0x00000002); +} + +static const struct nouveau_i2c_func +nv50_i2c_func = { + .drive_scl = nv50_i2c_drive_scl, + .drive_sda = nv50_i2c_drive_sda, + .sense_scl = nv50_i2c_sense_scl, + .sense_sda = nv50_i2c_sense_sda, +}; + +const u32 nv50_i2c_addr[] = { + 0x00e138, 0x00e150, 0x00e168, 0x00e180, + 0x00e254, 0x00e274, 0x00e764, 0x00e780, + 0x00e79c, 0x00e7b8 +}; +const int nv50_i2c_addr_nr = ARRAY_SIZE(nv50_i2c_addr); + +static int +nv50_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 index, + struct nouveau_object **pobject) +{ + struct dcb_i2c_entry *info = data; + struct nv50_i2c_port *port; + int ret; + + ret = nouveau_i2c_port_create(parent, engine, oclass, index, + &nouveau_i2c_bit_algo, &port); + *pobject = nv_object(port); + if (ret) + return ret; + + if (info->drive >= nv50_i2c_addr_nr) + return -EINVAL; + + port->base.func = &nv50_i2c_func; + port->state = 0x00000007; + port->addr = nv50_i2c_addr[info->drive]; + return 0; +} + +int +nv50_i2c_port_init(struct nouveau_object *object) +{ + struct nv50_i2c_priv *priv = (void *)object->engine; + struct nv50_i2c_port *port = (void *)object; + nv_wr32(priv, port->addr, port->state); + return nouveau_i2c_port_init(&port->base); +} + +static struct nouveau_oclass +nv50_i2c_sclass[] = { + { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_i2c_port_ctor, + .dtor = _nouveau_i2c_port_dtor, + .init = nv50_i2c_port_init, + .fini = _nouveau_i2c_port_fini, + }, + }, + {} +}; + +static int +nv50_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_i2c_priv *priv; + int ret; + + ret = nouveau_i2c_create(parent, engine, oclass, nv50_i2c_sclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +struct nouveau_oclass +nv50_i2c_oclass = { + .handle = NV_SUBDEV(I2C, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_i2c_ctor, + .dtor = _nouveau_i2c_dtor, + .init = _nouveau_i2c_init, + .fini = _nouveau_i2c_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h new file mode 100644 index 0000000..4e5ba48 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h @@ -0,0 +1,32 @@ +#ifndef __NV50_I2C_H__ +#define __NV50_I2C_H__ + +#include <subdev/i2c.h> + +struct nv50_i2c_priv { + struct nouveau_i2c base; +}; + +struct nv50_i2c_port { + struct nouveau_i2c_port base; + u32 addr; + u32 ctrl; + u32 data; + u32 state; +}; + +extern const u32 nv50_i2c_addr[]; +extern const int nv50_i2c_addr_nr; +int nv50_i2c_port_init(struct nouveau_object *); +int nv50_i2c_sense_scl(struct nouveau_i2c_port *); +int nv50_i2c_sense_sda(struct nouveau_i2c_port *); +void nv50_i2c_drive_scl(struct nouveau_i2c_port *, int state); +void nv50_i2c_drive_sda(struct nouveau_i2c_port *, int state); + +int nv94_aux_port_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); +void nv94_i2c_acquire(struct nouveau_i2c_port *); +void nv94_i2c_release(struct nouveau_i2c_port *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c new file mode 100644 index 0000000..61b7716 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c @@ -0,0 +1,285 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "nv50.h" + +#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args) +#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args) + +static void +auxch_fini(struct nouveau_i2c *aux, int ch) +{ + nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000); +} + +static int +auxch_init(struct nouveau_i2c *aux, int ch) +{ + const u32 unksel = 1; /* nfi which to use, or if it matters.. */ + const u32 ureq = unksel ? 0x00100000 : 0x00200000; + const u32 urep = unksel ? 0x01000000 : 0x02000000; + u32 ctrl, timeout; + + /* wait up to 1ms for any previous transaction to be done... */ + timeout = 1000; + do { + ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50)); + udelay(1); + if (!timeout--) { + AUX_ERR("begin idle timeout 0x%08x\n", ctrl); + return -EBUSY; + } + } while (ctrl & 0x03010000); + + /* set some magic, and wait up to 1ms for it to appear */ + nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq); + timeout = 1000; + do { + ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50)); + udelay(1); + if (!timeout--) { + AUX_ERR("magic wait 0x%08x\n", ctrl); + auxch_fini(aux, ch); + return -EBUSY; + } + } while ((ctrl & 0x03000000) != urep); + + return 0; +} + +int +nv94_aux(struct nouveau_i2c_port *base, u8 type, u32 addr, u8 *data, u8 size) +{ + struct nouveau_i2c *aux = nouveau_i2c(base); + struct nv50_i2c_port *port = (void *)base; + u32 ctrl, stat, timeout, retries; + u32 xbuf[4] = {}; + int ch = port->addr; + int ret, i; + + AUX_DBG("%d: 0x%08x %d\n", type, addr, size); + + ret = auxch_init(aux, ch); + if (ret) + goto out; + + stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50)); + if (!(stat & 0x10000000)) { + AUX_DBG("sink not detected\n"); + ret = -ENXIO; + goto out; + } + + if (!(type & 1)) { + memcpy(xbuf, data, size); + for (i = 0; i < 16; i += 4) { + AUX_DBG("wr 0x%08x\n", xbuf[i / 4]); + nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]); + } + } + + ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50)); + ctrl &= ~0x0001f0ff; + ctrl |= type << 12; + ctrl |= size - 1; + nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr); + + /* retry transaction a number of times on failure... */ + ret = -EREMOTEIO; + for (retries = 0; retries < 32; retries++) { + /* reset, and delay a while if this is a retry */ + nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl); + nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl); + if (retries) + udelay(400); + + /* transaction request, wait up to 1ms for it to complete */ + nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl); + + timeout = 1000; + do { + ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50)); + udelay(1); + if (!timeout--) { + AUX_ERR("tx req timeout 0x%08x\n", ctrl); + goto out; + } + } while (ctrl & 0x00010000); + + /* read status, and check if transaction completed ok */ + stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0); + if (!(stat & 0x000f0f00)) { + ret = 0; + break; + } + + AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat); + } + + if (type & 1) { + for (i = 0; i < 16; i += 4) { + xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i); + AUX_DBG("rd 0x%08x\n", xbuf[i / 4]); + } + memcpy(data, xbuf, size); + } + +out: + auxch_fini(aux, ch); + return ret; +} + +void +nv94_i2c_acquire(struct nouveau_i2c_port *base) +{ + struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine; + struct nv50_i2c_port *port = (void *)base; + if (port->ctrl) { + nv_mask(priv, port->ctrl + 0x0c, 0x00000001, 0x00000000); + nv_mask(priv, port->ctrl + 0x00, 0x0000f003, port->data); + } +} + +void +nv94_i2c_release(struct nouveau_i2c_port *base) +{ +} + +static const struct nouveau_i2c_func +nv94_i2c_func = { + .acquire = nv94_i2c_acquire, + .release = nv94_i2c_release, + .drive_scl = nv50_i2c_drive_scl, + .drive_sda = nv50_i2c_drive_sda, + .sense_scl = nv50_i2c_sense_scl, + .sense_sda = nv50_i2c_sense_sda, +}; + +static int +nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 index, + struct nouveau_object **pobject) +{ + struct dcb_i2c_entry *info = data; + struct nv50_i2c_port *port; + int ret; + + ret = nouveau_i2c_port_create(parent, engine, oclass, index, + &nouveau_i2c_bit_algo, &port); + *pobject = nv_object(port); + if (ret) + return ret; + + if (info->drive >= nv50_i2c_addr_nr) + return -EINVAL; + + port->base.func = &nv94_i2c_func; + port->state = 7; + port->addr = nv50_i2c_addr[info->drive]; + if (info->share != DCB_I2C_UNUSED) { + port->ctrl = 0x00e500 + (info->share * 0x50); + port->data = 0x0000e001; + } + return 0; +} + +static const struct nouveau_i2c_func +nv94_aux_func = { + .acquire = nv94_i2c_acquire, + .release = nv94_i2c_release, + .aux = nv94_aux, +}; + +int +nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 index, + struct nouveau_object **pobject) +{ + struct dcb_i2c_entry *info = data; + struct nv50_i2c_port *port; + int ret; + + ret = nouveau_i2c_port_create(parent, engine, oclass, index, + &nouveau_i2c_aux_algo, &port); + *pobject = nv_object(port); + if (ret) + return ret; + + port->base.func = &nv94_aux_func; + port->addr = info->drive; + if (info->share != DCB_I2C_UNUSED) { + port->ctrl = 0x00e500 + (info->drive * 0x50); + port->data = 0x00002002; + } + + return 0; +} + +static struct nouveau_oclass +nv94_i2c_sclass[] = { + { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv94_i2c_port_ctor, + .dtor = _nouveau_i2c_port_dtor, + .init = nv50_i2c_port_init, + .fini = _nouveau_i2c_port_fini, + }, + }, + { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv94_aux_port_ctor, + .dtor = _nouveau_i2c_port_dtor, + .init = _nouveau_i2c_port_init, + .fini = _nouveau_i2c_port_fini, + }, + }, + {} +}; + +static int +nv94_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_i2c_priv *priv; + int ret; + + ret = nouveau_i2c_create(parent, engine, oclass, nv94_i2c_sclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +struct nouveau_oclass +nv94_i2c_oclass = { + .handle = NV_SUBDEV(I2C, 0x94), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv94_i2c_ctor, + .dtor = _nouveau_i2c_dtor, + .init = _nouveau_i2c_init, + .fini = _nouveau_i2c_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c new file mode 100644 index 0000000..f761b8a --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c @@ -0,0 +1,124 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "nv50.h" + +static int +nvd0_i2c_sense_scl(struct nouveau_i2c_port *base) +{ + struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine; + struct nv50_i2c_port *port = (void *)base; + return !!(nv_rd32(priv, port->addr) & 0x00000010); +} + +static int +nvd0_i2c_sense_sda(struct nouveau_i2c_port *base) +{ + struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine; + struct nv50_i2c_port *port = (void *)base; + return !!(nv_rd32(priv, port->addr) & 0x00000020); +} + +static const struct nouveau_i2c_func +nvd0_i2c_func = { + .acquire = nv94_i2c_acquire, + .release = nv94_i2c_release, + .drive_scl = nv50_i2c_drive_scl, + .drive_sda = nv50_i2c_drive_sda, + .sense_scl = nvd0_i2c_sense_scl, + .sense_sda = nvd0_i2c_sense_sda, +}; + +static int +nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 index, + struct nouveau_object **pobject) +{ + struct dcb_i2c_entry *info = data; + struct nv50_i2c_port *port; + int ret; + + ret = nouveau_i2c_port_create(parent, engine, oclass, index, + &nouveau_i2c_bit_algo, &port); + *pobject = nv_object(port); + if (ret) + return ret; + + port->base.func = &nvd0_i2c_func; + port->state = 0x00000007; + port->addr = 0x00d014 + (info->drive * 0x20); + if (info->share != DCB_I2C_UNUSED) { + port->ctrl = 0x00e500 + (info->share * 0x50); + port->data = 0x0000e001; + } + return 0; +} + +static struct nouveau_oclass +nvd0_i2c_sclass[] = { + { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvd0_i2c_port_ctor, + .dtor = _nouveau_i2c_port_dtor, + .init = nv50_i2c_port_init, + .fini = _nouveau_i2c_port_fini, + }, + }, + { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv94_aux_port_ctor, + .dtor = _nouveau_i2c_port_dtor, + .init = _nouveau_i2c_port_init, + .fini = _nouveau_i2c_port_fini, + }, + }, + {} +}; + +static int +nvd0_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_i2c_priv *priv; + int ret; + + ret = nouveau_i2c_create(parent, engine, oclass, nvd0_i2c_sclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +struct nouveau_oclass +nvd0_i2c_oclass = { + .handle = NV_SUBDEV(I2C, 0xd0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvd0_i2c_ctor, + .dtor = _nouveau_i2c_dtor, + .init = _nouveau_i2c_init, + .fini = _nouveau_i2c_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c new file mode 100644 index 0000000..4e977ff --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c @@ -0,0 +1,123 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/ibus.h> + +struct nvc0_ibus_priv { + struct nouveau_ibus base; +}; + +static void +nvc0_ibus_intr_hub(struct nvc0_ibus_priv *priv, int i) +{ + u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0400)); + u32 data = nv_rd32(priv, 0x122124 + (i * 0x0400)); + u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0400)); + nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat); + nv_mask(priv, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000); +} + +static void +nvc0_ibus_intr_rop(struct nvc0_ibus_priv *priv, int i) +{ + u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0400)); + u32 data = nv_rd32(priv, 0x124124 + (i * 0x0400)); + u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0400)); + nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat); + nv_mask(priv, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000); +} + +static void +nvc0_ibus_intr_gpc(struct nvc0_ibus_priv *priv, int i) +{ + u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0400)); + u32 data = nv_rd32(priv, 0x128124 + (i * 0x0400)); + u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0400)); + nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat); + nv_mask(priv, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000); +} + +static void +nvc0_ibus_intr(struct nouveau_subdev *subdev) +{ + struct nvc0_ibus_priv *priv = (void *)subdev; + u32 intr0 = nv_rd32(priv, 0x121c58); + u32 intr1 = nv_rd32(priv, 0x121c5c); + u32 hubnr = nv_rd32(priv, 0x121c70); + u32 ropnr = nv_rd32(priv, 0x121c74); + u32 gpcnr = nv_rd32(priv, 0x121c78); + u32 i; + + for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) { + u32 stat = 0x00000100 << i; + if (intr0 & stat) { + nvc0_ibus_intr_hub(priv, i); + intr0 &= ~stat; + } + } + + for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) { + u32 stat = 0x00010000 << i; + if (intr0 & stat) { + nvc0_ibus_intr_rop(priv, i); + intr0 &= ~stat; + } + } + + for (i = 0; intr1 && i < gpcnr; i++) { + u32 stat = 0x00000001 << i; + if (intr1 & stat) { + nvc0_ibus_intr_gpc(priv, i); + intr1 &= ~stat; + } + } +} + +static int +nvc0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_ibus_priv *priv; + int ret; + + ret = nouveau_ibus_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->intr = nvc0_ibus_intr; + return 0; +} + +struct nouveau_oclass +nvc0_ibus_oclass = { + .handle = NV_SUBDEV(IBUS, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_ibus_ctor, + .dtor = _nouveau_ibus_dtor, + .init = _nouveau_ibus_init, + .fini = _nouveau_ibus_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c new file mode 100644 index 0000000..7120124 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c @@ -0,0 +1,123 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/ibus.h> + +struct nve0_ibus_priv { + struct nouveau_ibus base; +}; + +static void +nve0_ibus_intr_hub(struct nve0_ibus_priv *priv, int i) +{ + u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0800)); + u32 data = nv_rd32(priv, 0x122124 + (i * 0x0800)); + u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0800)); + nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat); + nv_mask(priv, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000); +} + +static void +nve0_ibus_intr_rop(struct nve0_ibus_priv *priv, int i) +{ + u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0800)); + u32 data = nv_rd32(priv, 0x124124 + (i * 0x0800)); + u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0800)); + nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat); + nv_mask(priv, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000); +} + +static void +nve0_ibus_intr_gpc(struct nve0_ibus_priv *priv, int i) +{ + u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0800)); + u32 data = nv_rd32(priv, 0x128124 + (i * 0x0800)); + u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0800)); + nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat); + nv_mask(priv, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000); +} + +static void +nve0_ibus_intr(struct nouveau_subdev *subdev) +{ + struct nve0_ibus_priv *priv = (void *)subdev; + u32 intr0 = nv_rd32(priv, 0x120058); + u32 intr1 = nv_rd32(priv, 0x12005c); + u32 hubnr = nv_rd32(priv, 0x120070); + u32 ropnr = nv_rd32(priv, 0x120074); + u32 gpcnr = nv_rd32(priv, 0x120078); + u32 i; + + for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) { + u32 stat = 0x00000100 << i; + if (intr0 & stat) { + nve0_ibus_intr_hub(priv, i); + intr0 &= ~stat; + } + } + + for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) { + u32 stat = 0x00010000 << i; + if (intr0 & stat) { + nve0_ibus_intr_rop(priv, i); + intr0 &= ~stat; + } + } + + for (i = 0; intr1 && i < gpcnr; i++) { + u32 stat = 0x00000001 << i; + if (intr1 & stat) { + nve0_ibus_intr_gpc(priv, i); + intr1 &= ~stat; + } + } +} + +static int +nve0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nve0_ibus_priv *priv; + int ret; + + ret = nouveau_ibus_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + nv_subdev(priv)->intr = nve0_ibus_intr; + return 0; +} + +struct nouveau_oclass +nve0_ibus_oclass = { + .handle = NV_SUBDEV(IBUS, 0xe0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nve0_ibus_ctor, + .dtor = _nouveau_ibus_dtor, + .init = _nouveau_ibus_init, + .fini = _nouveau_ibus_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c new file mode 100644 index 0000000..6565f3d --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c @@ -0,0 +1,154 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/instmem.h> + +int +nouveau_instobj_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, + int length, void **pobject) +{ + struct nouveau_instmem *imem = (void *)engine; + struct nouveau_instobj *iobj; + int ret; + + ret = nouveau_object_create_(parent, engine, oclass, NV_MEMOBJ_CLASS, + length, pobject); + iobj = *pobject; + if (ret) + return ret; + + mutex_lock(&imem->base.mutex); + list_add(&iobj->head, &imem->list); + mutex_unlock(&imem->base.mutex); + return 0; +} + +void +nouveau_instobj_destroy(struct nouveau_instobj *iobj) +{ + struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine); + + mutex_lock(&subdev->mutex); + list_del(&iobj->head); + mutex_unlock(&subdev->mutex); + + return nouveau_object_destroy(&iobj->base); +} + +void +_nouveau_instobj_dtor(struct nouveau_object *object) +{ + struct nouveau_instobj *iobj = (void *)object; + return nouveau_instobj_destroy(iobj); +} + +int +nouveau_instmem_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, + int length, void **pobject) +{ + struct nouveau_instmem *imem; + int ret; + + ret = nouveau_subdev_create_(parent, engine, oclass, 0, + "INSTMEM", "instmem", length, pobject); + imem = *pobject; + if (ret) + return ret; + + INIT_LIST_HEAD(&imem->list); + return 0; +} + +int +nouveau_instmem_init(struct nouveau_instmem *imem) +{ + struct nouveau_instobj *iobj; + int ret, i; + + ret = nouveau_subdev_init(&imem->base); + if (ret) + return ret; + + mutex_lock(&imem->base.mutex); + + list_for_each_entry(iobj, &imem->list, head) { + if (iobj->suspend) { + for (i = 0; i < iobj->size; i += 4) + nv_wo32(iobj, i, iobj->suspend[i / 4]); + vfree(iobj->suspend); + iobj->suspend = NULL; + } + } + + mutex_unlock(&imem->base.mutex); + + return 0; +} + +int +nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend) +{ + struct nouveau_instobj *iobj; + int i, ret = 0; + + if (suspend) { + mutex_lock(&imem->base.mutex); + + list_for_each_entry(iobj, &imem->list, head) { + iobj->suspend = vmalloc(iobj->size); + if (!iobj->suspend) { + ret = -ENOMEM; + break; + } + + for (i = 0; i < iobj->size; i += 4) + iobj->suspend[i / 4] = nv_ro32(iobj, i); + } + + mutex_unlock(&imem->base.mutex); + + if (ret) + return ret; + } + + return nouveau_subdev_fini(&imem->base, suspend); +} + +int +_nouveau_instmem_init(struct nouveau_object *object) +{ + struct nouveau_instmem *imem = (void *)object; + return nouveau_instmem_init(imem); +} + +int +_nouveau_instmem_fini(struct nouveau_object *object, bool suspend) +{ + struct nouveau_instmem *imem = (void *)object; + return nouveau_instmem_fini(imem, suspend); +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c new file mode 100644 index 0000000..795393d --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c @@ -0,0 +1,190 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/fb.h> + +#include "nv04.h" + +static int +nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_instmem_priv *priv = (void *)engine; + struct nv04_instobj_priv *node; + int ret, align; + + align = (unsigned long)data; + if (!align) + align = 1; + + ret = nouveau_instobj_create(parent, engine, oclass, &node); + *pobject = nv_object(node); + if (ret) + return ret; + + ret = nouveau_mm_head(&priv->heap, 1, size, size, align, &node->mem); + if (ret) + return ret; + + node->base.addr = node->mem->offset; + node->base.size = node->mem->length; + return 0; +} + +static void +nv04_instobj_dtor(struct nouveau_object *object) +{ + struct nv04_instmem_priv *priv = (void *)object->engine; + struct nv04_instobj_priv *node = (void *)object; + nouveau_mm_free(&priv->heap, &node->mem); + nouveau_instobj_destroy(&node->base); +} + +static u32 +nv04_instobj_rd32(struct nouveau_object *object, u64 addr) +{ + struct nv04_instobj_priv *node = (void *)object; + return nv_ro32(object->engine, node->mem->offset + addr); +} + +static void +nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data) +{ + struct nv04_instobj_priv *node = (void *)object; + nv_wo32(object->engine, node->mem->offset + addr, data); +} + +static struct nouveau_oclass +nv04_instobj_oclass = { + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_instobj_ctor, + .dtor = nv04_instobj_dtor, + .init = _nouveau_instobj_init, + .fini = _nouveau_instobj_fini, + .rd32 = nv04_instobj_rd32, + .wr32 = nv04_instobj_wr32, + }, +}; + +int +nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent, + u32 size, u32 align, struct nouveau_object **pobject) +{ + struct nouveau_object *engine = nv_object(imem); + int ret; + + ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass, + (void *)(unsigned long)align, size, pobject); + if (ret) + return ret; + + return 0; +} + +static int +nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_instmem_priv *priv; + int ret; + + ret = nouveau_instmem_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + /* PRAMIN aperture maps over the end of VRAM, reserve it */ + priv->base.reserved = 512 * 1024; + priv->base.alloc = nv04_instmem_alloc; + + ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1); + if (ret) + return ret; + + /* 0x00000-0x10000: reserve for probable vbios image */ + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0, + &priv->vbios); + if (ret) + return ret; + + /* 0x10000-0x18000: reserve for RAMHT */ + ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht); + if (ret) + return ret; + + /* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */ + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00800, 0, + NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc); + if (ret) + return ret; + + /* 0x18800-0x18a00: reserve for RAMRO */ + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00200, 0, 0, + &priv->ramro); + if (ret) + return ret; + + return 0; +} + +void +nv04_instmem_dtor(struct nouveau_object *object) +{ + struct nv04_instmem_priv *priv = (void *)object; + nouveau_gpuobj_ref(NULL, &priv->ramfc); + nouveau_gpuobj_ref(NULL, &priv->ramro); + nouveau_ramht_ref(NULL, &priv->ramht); + nouveau_gpuobj_ref(NULL, &priv->vbios); + nouveau_mm_fini(&priv->heap); + if (priv->iomem) + iounmap(priv->iomem); + nouveau_instmem_destroy(&priv->base); +} + +static u32 +nv04_instmem_rd32(struct nouveau_object *object, u64 addr) +{ + return nv_rd32(object, 0x700000 + addr); +} + +static void +nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data) +{ + return nv_wr32(object, 0x700000 + addr, data); +} + +struct nouveau_oclass +nv04_instmem_oclass = { + .handle = NV_SUBDEV(INSTMEM, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_instmem_ctor, + .dtor = nv04_instmem_dtor, + .init = _nouveau_instmem_init, + .fini = _nouveau_instmem_fini, + .rd32 = nv04_instmem_rd32, + .wr32 = nv04_instmem_wr32, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h new file mode 100644 index 0000000..b15b613 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h @@ -0,0 +1,38 @@ +#ifndef __NV04_INSTMEM_H__ +#define __NV04_INSTMEM_H__ + +#include <core/gpuobj.h> +#include <core/ramht.h> +#include <core/mm.h> + +#include <subdev/instmem.h> + +struct nv04_instmem_priv { + struct nouveau_instmem base; + + void __iomem *iomem; + struct nouveau_mm heap; + + struct nouveau_gpuobj *vbios; + struct nouveau_ramht *ramht; + struct nouveau_gpuobj *ramro; + struct nouveau_gpuobj *ramfc; +}; + +static inline struct nv04_instmem_priv * +nv04_instmem(void *obj) +{ + return (void *)nouveau_instmem(obj); +} + +struct nv04_instobj_priv { + struct nouveau_instobj base; + struct nouveau_mm_node *mem; +}; + +void nv04_instmem_dtor(struct nouveau_object *); + +int nv04_instmem_alloc(struct nouveau_instmem *, struct nouveau_object *, + u32 size, u32 align, struct nouveau_object **pobject); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c new file mode 100644 index 0000000..716bf41 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c @@ -0,0 +1,140 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "nv04.h" + +static inline int +nv44_graph_class(struct nv04_instmem_priv *priv) +{ + if ((nv_device(priv)->chipset & 0xf0) == 0x60) + return 1; + return !(0x0baf & (1 << (nv_device(priv)->chipset & 0x0f))); +} + +static int +nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct pci_dev *pdev = device->pdev; + struct nv04_instmem_priv *priv; + int ret, bar, vs; + + ret = nouveau_instmem_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + /* map bar */ + if (pci_resource_len(pdev, 2)) + bar = 2; + else + bar = 3; + + priv->iomem = ioremap(pci_resource_start(pdev, bar), + pci_resource_len(pdev, bar)); + if (!priv->iomem) { + nv_error(priv, "unable to map PRAMIN BAR\n"); + return -EFAULT; + } + + /* PRAMIN aperture maps over the end of vram, reserve enough space + * to fit graphics contexts for every channel, the magics come + * from engine/graph/nv40.c + */ + vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8); + if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs; + else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs; + else if (nv44_graph_class(priv)) priv->base.reserved = 0x4980 * vs; + else priv->base.reserved = 0x4a40 * vs; + priv->base.reserved += 16 * 1024; + priv->base.reserved *= 32; /* per-channel */ + priv->base.reserved += 512 * 1024; /* pci(e)gart table */ + priv->base.reserved += 512 * 1024; /* object storage */ + + priv->base.reserved = round_up(priv->base.reserved, 4096); + priv->base.alloc = nv04_instmem_alloc; + + ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1); + if (ret) + return ret; + + /* 0x00000-0x10000: reserve for probable vbios image */ + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0, + &priv->vbios); + if (ret) + return ret; + + /* 0x10000-0x18000: reserve for RAMHT */ + ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0, + &priv->ramht); + if (ret) + return ret; + + /* 0x18000-0x18200: reserve for RAMRO + * 0x18200-0x20000: padding + */ + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0, + &priv->ramro); + if (ret) + return ret; + + /* 0x20000-0x21000: reserve for RAMFC + * 0x21000-0x40000: padding and some unknown crap + */ + ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0, + NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc); + if (ret) + return ret; + + return 0; +} + +static u32 +nv40_instmem_rd32(struct nouveau_object *object, u64 addr) +{ + struct nv04_instmem_priv *priv = (void *)object; + return ioread32_native(priv->iomem + addr); +} + +static void +nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data) +{ + struct nv04_instmem_priv *priv = (void *)object; + iowrite32_native(data, priv->iomem + addr); +} + +struct nouveau_oclass +nv40_instmem_oclass = { + .handle = NV_SUBDEV(INSTMEM, 0x40), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv40_instmem_ctor, + .dtor = nv04_instmem_dtor, + .init = _nouveau_instmem_init, + .fini = _nouveau_instmem_fini, + .rd32 = nv40_instmem_rd32, + .wr32 = nv40_instmem_wr32, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c new file mode 100644 index 0000000..cfc7e31 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c @@ -0,0 +1,172 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/instmem.h> +#include <subdev/fb.h> + +#include <core/mm.h> + +struct nv50_instmem_priv { + struct nouveau_instmem base; + spinlock_t lock; + u64 addr; +}; + +struct nv50_instobj_priv { + struct nouveau_instobj base; + struct nouveau_mem *mem; +}; + +static int +nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_fb *pfb = nouveau_fb(parent); + struct nv50_instobj_priv *node; + u32 align = (unsigned long)data; + int ret; + + size = max((size + 4095) & ~4095, (u32)4096); + align = max((align + 4095) & ~4095, (u32)4096); + + ret = nouveau_instobj_create(parent, engine, oclass, &node); + *pobject = nv_object(node); + if (ret) + return ret; + + ret = pfb->ram.get(pfb, size, align, 0, 0x800, &node->mem); + if (ret) + return ret; + + node->base.addr = node->mem->offset; + node->base.size = node->mem->size << 12; + node->mem->page_shift = 12; + return 0; +} + +static void +nv50_instobj_dtor(struct nouveau_object *object) +{ + struct nv50_instobj_priv *node = (void *)object; + struct nouveau_fb *pfb = nouveau_fb(object); + pfb->ram.put(pfb, &node->mem); + nouveau_instobj_destroy(&node->base); +} + +static u32 +nv50_instobj_rd32(struct nouveau_object *object, u64 offset) +{ + struct nv50_instmem_priv *priv = (void *)object->engine; + struct nv50_instobj_priv *node = (void *)object; + unsigned long flags; + u64 base = (node->mem->offset + offset) & 0xffffff00000ULL; + u64 addr = (node->mem->offset + offset) & 0x000000fffffULL; + u32 data; + + spin_lock_irqsave(&priv->lock, flags); + if (unlikely(priv->addr != base)) { + nv_wr32(priv, 0x001700, base >> 16); + priv->addr = base; + } + data = nv_rd32(priv, 0x700000 + addr); + spin_unlock_irqrestore(&priv->lock, flags); + return data; +} + +static void +nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data) +{ + struct nv50_instmem_priv *priv = (void *)object->engine; + struct nv50_instobj_priv *node = (void *)object; + unsigned long flags; + u64 base = (node->mem->offset + offset) & 0xffffff00000ULL; + u64 addr = (node->mem->offset + offset) & 0x000000fffffULL; + + spin_lock_irqsave(&priv->lock, flags); + if (unlikely(priv->addr != base)) { + nv_wr32(priv, 0x001700, base >> 16); + priv->addr = base; + } + nv_wr32(priv, 0x700000 + addr, data); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static struct nouveau_oclass +nv50_instobj_oclass = { + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_instobj_ctor, + .dtor = nv50_instobj_dtor, + .init = _nouveau_instobj_init, + .fini = _nouveau_instobj_fini, + .rd32 = nv50_instobj_rd32, + .wr32 = nv50_instobj_wr32, + }, +}; + +static int +nv50_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent, + u32 size, u32 align, struct nouveau_object **pobject) +{ + struct nouveau_object *engine = nv_object(imem); + return nouveau_object_ctor(parent, engine, &nv50_instobj_oclass, + (void *)(unsigned long)align, size, pobject); +} + +static int +nv50_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_instmem_priv *priv; + int ret; + + ret = nouveau_instmem_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + spin_lock_init(&priv->lock); + priv->base.alloc = nv50_instmem_alloc; + return 0; +} + +static int +nv50_instmem_fini(struct nouveau_object *object, bool suspend) +{ + struct nv50_instmem_priv *priv = (void *)object; + priv->addr = ~0ULL; + return nouveau_instmem_fini(&priv->base, suspend); +} + +struct nouveau_oclass +nv50_instmem_oclass = { + .handle = NV_SUBDEV(INSTMEM, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_instmem_ctor, + .dtor = _nouveau_instmem_dtor, + .init = _nouveau_instmem_init, + .fini = nv50_instmem_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c new file mode 100644 index 0000000..fb794e9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c @@ -0,0 +1,221 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/ltcg.h> +#include <subdev/fb.h> +#include <subdev/timer.h> + +struct nvc0_ltcg_priv { + struct nouveau_ltcg base; + u32 part_nr; + u32 subp_nr; + struct nouveau_mm tags; + u32 num_tags; + struct nouveau_mm_node *tag_ram; +}; + +static void +nvc0_ltcg_subp_isr(struct nvc0_ltcg_priv *priv, int unit, int subp) +{ + u32 subp_base = 0x141000 + (unit * 0x2000) + (subp * 0x400); + u32 stat = nv_rd32(priv, subp_base + 0x020); + + if (stat) { + nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", unit, subp, stat); + nv_wr32(priv, subp_base + 0x020, stat); + } +} + +static void +nvc0_ltcg_intr(struct nouveau_subdev *subdev) +{ + struct nvc0_ltcg_priv *priv = (void *)subdev; + u32 units; + + units = nv_rd32(priv, 0x00017c); + while (units) { + u32 subp, unit = ffs(units) - 1; + for (subp = 0; subp < priv->subp_nr; subp++) + nvc0_ltcg_subp_isr(priv, unit, subp); + units &= ~(1 << unit); + } + + /* we do something horribly wrong and upset PMFB a lot, so mask off + * interrupts from it after the first one until it's fixed + */ + nv_mask(priv, 0x000640, 0x02000000, 0x00000000); +} + +static int +nvc0_ltcg_tags_alloc(struct nouveau_ltcg *ltcg, u32 n, + struct nouveau_mm_node **pnode) +{ + struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; + int ret; + + ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode); + if (ret) + *pnode = NULL; + + return ret; +} + +static void +nvc0_ltcg_tags_free(struct nouveau_ltcg *ltcg, struct nouveau_mm_node **pnode) +{ + struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; + + nouveau_mm_free(&priv->tags, pnode); +} + +static void +nvc0_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count) +{ + struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; + u32 last = first + count - 1; + int p, i; + + BUG_ON((first > last) || (last >= priv->num_tags)); + + nv_wr32(priv, 0x17e8cc, first); + nv_wr32(priv, 0x17e8d0, last); + nv_wr32(priv, 0x17e8c8, 0x4); /* trigger clear */ + + /* wait until it's finished with clearing */ + for (p = 0; p < priv->part_nr; ++p) { + for (i = 0; i < priv->subp_nr; ++i) + nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0); + } +} + +/* TODO: Figure out tag memory details and drop the over-cautious allocation. + */ +static int +nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) +{ + u32 tag_size, tag_margin, tag_align; + int ret; + + nv_wr32(priv, 0x17e8d8, priv->part_nr); + if (nv_device(pfb)->card_type >= NV_E0) + nv_wr32(priv, 0x17e000, priv->part_nr); + + /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ + priv->num_tags = (pfb->ram.size >> 17) / 4; + if (priv->num_tags > (1 << 17)) + priv->num_tags = 1 << 17; /* we have 17 bits in PTE */ + priv->num_tags = (priv->num_tags + 63) & ~63; /* round up to 64 */ + + tag_align = priv->part_nr * 0x800; + tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align; + + /* 4 part 4 sub: 0x2000 bytes for 56 tags */ + /* 3 part 4 sub: 0x6000 bytes for 168 tags */ + /* + * About 147 bytes per tag. Let's be safe and allocate x2, which makes + * 0x4980 bytes for 64 tags, and round up to 0x6000 bytes for 64 tags. + * + * For 4 GiB of memory we'll have 8192 tags which makes 3 MiB, < 0.1 %. + */ + tag_size = (priv->num_tags / 64) * 0x6000 + tag_margin; + tag_size += tag_align; + tag_size = (tag_size + 0xfff) >> 12; /* round up */ + + ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1, + &priv->tag_ram); + if (ret) { + priv->num_tags = 0; + } else { + u64 tag_base = (priv->tag_ram->offset << 12) + tag_margin; + + tag_base += tag_align - 1; + ret = do_div(tag_base, tag_align); + + nv_wr32(priv, 0x17e8d4, tag_base); + } + ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); + + return ret; +} + +static int +nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_ltcg_priv *priv; + struct nouveau_fb *pfb = nouveau_fb(parent); + u32 parts, mask; + int ret, i; + + ret = nouveau_ltcg_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + parts = nv_rd32(priv, 0x022438); + mask = nv_rd32(priv, 0x022554); + for (i = 0; i < parts; i++) { + if (!(mask & (1 << i))) + priv->part_nr++; + } + priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; + + nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ + + ret = nvc0_ltcg_init_tag_ram(pfb, priv); + if (ret) + return ret; + + priv->base.tags_alloc = nvc0_ltcg_tags_alloc; + priv->base.tags_free = nvc0_ltcg_tags_free; + priv->base.tags_clear = nvc0_ltcg_tags_clear; + + nv_subdev(priv)->intr = nvc0_ltcg_intr; + return 0; +} + +static void +nvc0_ltcg_dtor(struct nouveau_object *object) +{ + struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object; + struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; + struct nouveau_fb *pfb = nouveau_fb(ltcg->base.base.parent); + + nouveau_mm_fini(&priv->tags); + nouveau_mm_free(&pfb->vram, &priv->tag_ram); + + nouveau_ltcg_destroy(ltcg); +} + +struct nouveau_oclass +nvc0_ltcg_oclass = { + .handle = NV_SUBDEV(LTCG, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_ltcg_ctor, + .dtor = nvc0_ltcg_dtor, + .init = _nouveau_ltcg_init, + .fini = _nouveau_ltcg_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c new file mode 100644 index 0000000..ec9cd6f --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c @@ -0,0 +1,105 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/mc.h> + +static irqreturn_t +nouveau_mc_intr(int irq, void *arg) +{ + struct nouveau_mc *pmc = arg; + const struct nouveau_mc_intr *map = pmc->intr_map; + struct nouveau_subdev *unit; + u32 stat, intr; + + intr = stat = nv_rd32(pmc, 0x000100); + while (stat && map->stat) { + if (stat & map->stat) { + unit = nouveau_subdev(pmc, map->unit); + if (unit && unit->intr) + unit->intr(unit); + intr &= ~map->stat; + } + map++; + } + + if (intr) { + nv_error(pmc, "unknown intr 0x%08x\n", stat); + } + + return stat ? IRQ_HANDLED : IRQ_NONE; +} + +int +_nouveau_mc_fini(struct nouveau_object *object, bool suspend) +{ + struct nouveau_mc *pmc = (void *)object; + nv_wr32(pmc, 0x000140, 0x00000000); + return nouveau_subdev_fini(&pmc->base, suspend); +} + +int +_nouveau_mc_init(struct nouveau_object *object) +{ + struct nouveau_mc *pmc = (void *)object; + int ret = nouveau_subdev_init(&pmc->base); + if (ret) + return ret; + nv_wr32(pmc, 0x000140, 0x00000001); + return 0; +} + +void +_nouveau_mc_dtor(struct nouveau_object *object) +{ + struct nouveau_device *device = nv_device(object); + struct nouveau_mc *pmc = (void *)object; + free_irq(device->pdev->irq, pmc); + nouveau_subdev_destroy(&pmc->base); +} + +int +nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, + const struct nouveau_mc_intr *intr_map, + int length, void **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct nouveau_mc *pmc; + int ret; + + ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PMC", + "master", length, pobject); + pmc = *pobject; + if (ret) + return ret; + + pmc->intr_map = intr_map; + + ret = request_irq(device->pdev->irq, nouveau_mc_intr, + IRQF_SHARED, "nouveau", pmc); + if (ret < 0) + return ret; + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c new file mode 100644 index 0000000..64aa4ed --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c @@ -0,0 +1,81 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/mc.h> + +struct nv04_mc_priv { + struct nouveau_mc base; +}; + +const struct nouveau_mc_intr +nv04_mc_intr[] = { + { 0x00000001, NVDEV_ENGINE_MPEG }, /* NV17- MPEG/ME */ + { 0x00000100, NVDEV_ENGINE_FIFO }, + { 0x00001000, NVDEV_ENGINE_GR }, + { 0x00020000, NVDEV_ENGINE_VP }, /* NV40- */ + { 0x00100000, NVDEV_SUBDEV_TIMER }, + { 0x01000000, NVDEV_ENGINE_DISP }, /* NV04- PCRTC0 */ + { 0x02000000, NVDEV_ENGINE_DISP }, /* NV11- PCRTC1 */ + { 0x10000000, NVDEV_SUBDEV_BUS }, + { 0x80000000, NVDEV_ENGINE_SW }, + {} +}; + +static int +nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_mc_priv *priv; + int ret; + + ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +int +nv04_mc_init(struct nouveau_object *object) +{ + struct nv04_mc_priv *priv = (void *)object; + + nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */ + nv_wr32(priv, 0x001850, 0x00000001); /* disable rom access */ + + return nouveau_mc_init(&priv->base); +} + +struct nouveau_oclass +nv04_mc_oclass = { + .handle = NV_SUBDEV(MC, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_mc_ctor, + .dtor = _nouveau_mc_dtor, + .init = nv04_mc_init, + .fini = _nouveau_mc_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c new file mode 100644 index 0000000..d989178 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c @@ -0,0 +1,72 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/mc.h> + +struct nv44_mc_priv { + struct nouveau_mc base; +}; + +static int +nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv44_mc_priv *priv; + int ret; + + ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +static int +nv44_mc_init(struct nouveau_object *object) +{ + struct nv44_mc_priv *priv = (void *)object; + u32 tmp = nv_rd32(priv, 0x10020c); + + nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */ + + nv_wr32(priv, 0x001700, tmp); + nv_wr32(priv, 0x001704, 0); + nv_wr32(priv, 0x001708, 0); + nv_wr32(priv, 0x00170c, tmp); + + return nouveau_mc_init(&priv->base); +} + +struct nouveau_oclass +nv44_mc_oclass = { + .handle = NV_SUBDEV(MC, 0x44), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv44_mc_ctor, + .dtor = _nouveau_mc_dtor, + .init = nv44_mc_init, + .fini = _nouveau_mc_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c new file mode 100644 index 0000000..732d810 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c @@ -0,0 +1,80 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/mc.h> + +struct nv50_mc_priv { + struct nouveau_mc base; +}; + +static const struct nouveau_mc_intr +nv50_mc_intr[] = { + { 0x00000001, NVDEV_ENGINE_MPEG }, + { 0x00000100, NVDEV_ENGINE_FIFO }, + { 0x00001000, NVDEV_ENGINE_GR }, + { 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84- */ + { 0x00008000, NVDEV_ENGINE_BSP }, /* NV84- */ + { 0x00100000, NVDEV_SUBDEV_TIMER }, + { 0x00200000, NVDEV_SUBDEV_GPIO }, + { 0x04000000, NVDEV_ENGINE_DISP }, + { 0x10000000, NVDEV_SUBDEV_BUS }, + { 0x80000000, NVDEV_ENGINE_SW }, + { 0x0000d101, NVDEV_SUBDEV_FB }, + {}, +}; + +static int +nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_mc_priv *priv; + int ret; + + ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +int +nv50_mc_init(struct nouveau_object *object) +{ + struct nv50_mc_priv *priv = (void *)object; + nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */ + return nouveau_mc_init(&priv->base); +} + +struct nouveau_oclass +nv50_mc_oclass = { + .handle = NV_SUBDEV(MC, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_mc_ctor, + .dtor = _nouveau_mc_dtor, + .init = nv50_mc_init, + .fini = _nouveau_mc_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c new file mode 100644 index 0000000..0d57b4d --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c @@ -0,0 +1,74 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/mc.h> + +struct nv98_mc_priv { + struct nouveau_mc base; +}; + +static const struct nouveau_mc_intr +nv98_mc_intr[] = { + { 0x00000001, NVDEV_ENGINE_PPP }, + { 0x00000100, NVDEV_ENGINE_FIFO }, + { 0x00001000, NVDEV_ENGINE_GR }, + { 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */ + { 0x00008000, NVDEV_ENGINE_BSP }, + { 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */ + { 0x00100000, NVDEV_SUBDEV_TIMER }, + { 0x00200000, NVDEV_SUBDEV_GPIO }, + { 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */ + { 0x04000000, NVDEV_ENGINE_DISP }, + { 0x10000000, NVDEV_SUBDEV_BUS }, + { 0x80000000, NVDEV_ENGINE_SW }, + { 0x0040d101, NVDEV_SUBDEV_FB }, + {}, +}; + +static int +nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv98_mc_priv *priv; + int ret; + + ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +struct nouveau_oclass +nv98_mc_oclass = { + .handle = NV_SUBDEV(MC, 0x98), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv98_mc_ctor, + .dtor = _nouveau_mc_dtor, + .init = nv50_mc_init, + .fini = _nouveau_mc_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c new file mode 100644 index 0000000..4c97cd2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c @@ -0,0 +1,76 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/mc.h> + +struct nvc0_mc_priv { + struct nouveau_mc base; +}; + +static const struct nouveau_mc_intr +nvc0_mc_intr[] = { + { 0x00000001, NVDEV_ENGINE_PPP }, + { 0x00000020, NVDEV_ENGINE_COPY0 }, + { 0x00000040, NVDEV_ENGINE_COPY1 }, + { 0x00000100, NVDEV_ENGINE_FIFO }, + { 0x00001000, NVDEV_ENGINE_GR }, + { 0x00008000, NVDEV_ENGINE_BSP }, + { 0x00040000, NVDEV_SUBDEV_THERM }, + { 0x00020000, NVDEV_ENGINE_VP }, + { 0x00100000, NVDEV_SUBDEV_TIMER }, + { 0x00200000, NVDEV_SUBDEV_GPIO }, + { 0x02000000, NVDEV_SUBDEV_LTCG }, + { 0x04000000, NVDEV_ENGINE_DISP }, + { 0x10000000, NVDEV_SUBDEV_BUS }, + { 0x40000000, NVDEV_SUBDEV_IBUS }, + { 0x80000000, NVDEV_ENGINE_SW }, + {}, +}; + +static int +nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_mc_priv *priv; + int ret; + + ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + return 0; +} + +struct nouveau_oclass +nvc0_mc_oclass = { + .handle = NV_SUBDEV(MC, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_mc_ctor, + .dtor = _nouveau_mc_dtor, + .init = nv50_mc_init, + .fini = _nouveau_mc_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c new file mode 100644 index 0000000..29811ee --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c @@ -0,0 +1,290 @@ +/* + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/option.h> + +#include <subdev/i2c.h> +#include <subdev/mxm.h> +#include <subdev/bios.h> +#include <subdev/bios/mxm.h> + +#include "mxms.h" + +static bool +mxm_shadow_rom_fetch(struct nouveau_i2c_port *i2c, u8 addr, + u8 offset, u8 size, u8 *data) +{ + struct i2c_msg msgs[] = { + { .addr = addr, .flags = 0, .len = 1, .buf = &offset }, + { .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, }, + }; + + return i2c_transfer(&i2c->adapter, msgs, 2) == 2; +} + +static bool +mxm_shadow_rom(struct nouveau_mxm *mxm, u8 version) +{ + struct nouveau_bios *bios = nouveau_bios(mxm); + struct nouveau_i2c *i2c = nouveau_i2c(mxm); + struct nouveau_i2c_port *port = NULL; + u8 i2cidx, mxms[6], addr, size; + + i2cidx = mxm_ddc_map(bios, 1 /* LVDS_DDC */) & 0x0f; + if (i2cidx < 0x0f) + port = i2c->find(i2c, i2cidx); + if (!port) + return false; + + addr = 0x54; + if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms)) { + addr = 0x56; + if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms)) + return false; + } + + mxm->mxms = mxms; + size = mxms_headerlen(mxm) + mxms_structlen(mxm); + mxm->mxms = kmalloc(size, GFP_KERNEL); + + if (mxm->mxms && + mxm_shadow_rom_fetch(port, addr, 0, size, mxm->mxms)) + return true; + + kfree(mxm->mxms); + mxm->mxms = NULL; + return false; +} + +#if defined(CONFIG_ACPI) +static bool +mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version) +{ + struct nouveau_device *device = nv_device(mxm); + static char muid[] = { + 0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C, + 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65 + }; + u32 mxms_args[] = { 0x00000000 }; + union acpi_object args[4] = { + /* _DSM MUID */ + { .buffer.type = 3, + .buffer.length = sizeof(muid), + .buffer.pointer = muid, + }, + /* spec says this can be zero to mean "highest revision", but + * of course there's at least one bios out there which fails + * unless you pass in exactly the version it supports.. + */ + { .integer.type = ACPI_TYPE_INTEGER, + .integer.value = (version & 0xf0) << 4 | (version & 0x0f), + }, + /* MXMS function */ + { .integer.type = ACPI_TYPE_INTEGER, + .integer.value = 0x00000010, + }, + /* Pointer to MXMS arguments */ + { .buffer.type = ACPI_TYPE_BUFFER, + .buffer.length = sizeof(mxms_args), + .buffer.pointer = (char *)mxms_args, + }, + }; + struct acpi_object_list list = { ARRAY_SIZE(args), args }; + struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + acpi_handle handle; + int ret; + + handle = DEVICE_ACPI_HANDLE(&device->pdev->dev); + if (!handle) + return false; + + ret = acpi_evaluate_object(handle, "_DSM", &list, &retn); + if (ret) { + nv_debug(mxm, "DSM MXMS failed: %d\n", ret); + return false; + } + + obj = retn.pointer; + if (obj->type == ACPI_TYPE_BUFFER) { + mxm->mxms = kmemdup(obj->buffer.pointer, + obj->buffer.length, GFP_KERNEL); + } else + if (obj->type == ACPI_TYPE_INTEGER) { + nv_debug(mxm, "DSM MXMS returned 0x%llx\n", obj->integer.value); + } + + kfree(obj); + return mxm->mxms != NULL; +} +#endif + +#if defined(CPTCFG_ACPI_WMI) || defined(CPTCFG_ACPI_WMI_MODULE) + +#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0" + +static u8 +wmi_wmmx_mxmi(struct nouveau_mxm *mxm, u8 version) +{ + u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 }; + struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args }; + struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + acpi_status status; + + status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn); + if (ACPI_FAILURE(status)) { + nv_debug(mxm, "WMMX MXMI returned %d\n", status); + return 0x00; + } + + obj = retn.pointer; + if (obj->type == ACPI_TYPE_INTEGER) { + version = obj->integer.value; + nv_debug(mxm, "WMMX MXMI version %d.%d\n", + (version >> 4), version & 0x0f); + } else { + version = 0; + nv_debug(mxm, "WMMX MXMI returned non-integer\n"); + } + + kfree(obj); + return version; +} + +static bool +mxm_shadow_wmi(struct nouveau_mxm *mxm, u8 version) +{ + u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 }; + struct acpi_buffer args = { sizeof(mxms_args), mxms_args }; + struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + acpi_status status; + + if (!wmi_has_guid(WMI_WMMX_GUID)) { + nv_debug(mxm, "WMMX GUID not found\n"); + return false; + } + + mxms_args[1] = wmi_wmmx_mxmi(mxm, 0x00); + if (!mxms_args[1]) + mxms_args[1] = wmi_wmmx_mxmi(mxm, version); + if (!mxms_args[1]) + return false; + + status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn); + if (ACPI_FAILURE(status)) { + nv_debug(mxm, "WMMX MXMS returned %d\n", status); + return false; + } + + obj = retn.pointer; + if (obj->type == ACPI_TYPE_BUFFER) { + mxm->mxms = kmemdup(obj->buffer.pointer, + obj->buffer.length, GFP_KERNEL); + } + + kfree(obj); + return mxm->mxms != NULL; +} +#endif + +static struct mxm_shadow_h { + const char *name; + bool (*exec)(struct nouveau_mxm *, u8 version); +} _mxm_shadow[] = { + { "ROM", mxm_shadow_rom }, +#if defined(CONFIG_ACPI) + { "DSM", mxm_shadow_dsm }, +#endif +#if defined(CPTCFG_ACPI_WMI) || defined(CPTCFG_ACPI_WMI_MODULE) + { "WMI", mxm_shadow_wmi }, +#endif + {} +}; + +static int +mxm_shadow(struct nouveau_mxm *mxm, u8 version) +{ + struct mxm_shadow_h *shadow = _mxm_shadow; + do { + nv_debug(mxm, "checking %s\n", shadow->name); + if (shadow->exec(mxm, version)) { + if (mxms_valid(mxm)) + return 0; + kfree(mxm->mxms); + mxm->mxms = NULL; + } + } while ((++shadow)->name); + return -ENOENT; +} + +int +nouveau_mxm_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, int length, void **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct nouveau_bios *bios = nouveau_bios(device); + struct nouveau_mxm *mxm; + u8 ver, len; + u16 data; + int ret; + + ret = nouveau_subdev_create_(parent, engine, oclass, 0, "MXM", "mxm", + length, pobject); + mxm = *pobject; + if (ret) + return ret; + + data = mxm_table(bios, &ver, &len); + if (!data || !(ver = nv_ro08(bios, data))) { + nv_debug(mxm, "no VBIOS data, nothing to do\n"); + return 0; + } + + nv_info(mxm, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f); + + if (mxm_shadow(mxm, ver)) { + nv_info(mxm, "failed to locate valid SIS\n"); +#if 0 + /* we should, perhaps, fall back to some kind of limited + * mode here if the x86 vbios hasn't already done the + * work for us (so we prevent loading with completely + * whacked vbios tables). + */ + return -EINVAL; +#else + return 0; +#endif + } + + nv_info(mxm, "MXMS Version %d.%d\n", + mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff); + mxms_foreach(mxm, 0, NULL, NULL); + + if (nouveau_boolopt(device->cfgopt, "NvMXMDCB", true)) + mxm->action |= MXM_SANITISE_DCB; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c new file mode 100644 index 0000000..4bde7f7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c @@ -0,0 +1,193 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/mxm.h> +#include "mxms.h" + +#define ROM16(x) le16_to_cpu(*(u16 *)&(x)) +#define ROM32(x) le32_to_cpu(*(u32 *)&(x)) + +static u8 * +mxms_data(struct nouveau_mxm *mxm) +{ + return mxm->mxms; + +} + +u16 +mxms_version(struct nouveau_mxm *mxm) +{ + u8 *mxms = mxms_data(mxm); + u16 version = (mxms[4] << 8) | mxms[5]; + switch (version ) { + case 0x0200: + case 0x0201: + case 0x0300: + return version; + default: + break; + } + + nv_debug(mxm, "unknown version %d.%d\n", mxms[4], mxms[5]); + return 0x0000; +} + +u16 +mxms_headerlen(struct nouveau_mxm *mxm) +{ + return 8; +} + +u16 +mxms_structlen(struct nouveau_mxm *mxm) +{ + return *(u16 *)&mxms_data(mxm)[6]; +} + +bool +mxms_checksum(struct nouveau_mxm *mxm) +{ + u16 size = mxms_headerlen(mxm) + mxms_structlen(mxm); + u8 *mxms = mxms_data(mxm), sum = 0; + while (size--) + sum += *mxms++; + if (sum) { + nv_debug(mxm, "checksum invalid\n"); + return false; + } + return true; +} + +bool +mxms_valid(struct nouveau_mxm *mxm) +{ + u8 *mxms = mxms_data(mxm); + if (*(u32 *)mxms != 0x5f4d584d) { + nv_debug(mxm, "signature invalid\n"); + return false; + } + + if (!mxms_version(mxm) || !mxms_checksum(mxm)) + return false; + + return true; +} + +bool +mxms_foreach(struct nouveau_mxm *mxm, u8 types, + bool (*exec)(struct nouveau_mxm *, u8 *, void *), void *info) +{ + u8 *mxms = mxms_data(mxm); + u8 *desc = mxms + mxms_headerlen(mxm); + u8 *fini = desc + mxms_structlen(mxm) - 1; + while (desc < fini) { + u8 type = desc[0] & 0x0f; + u8 headerlen = 0; + u8 recordlen = 0; + u8 entries = 0; + + switch (type) { + case 0: /* Output Device Structure */ + if (mxms_version(mxm) >= 0x0300) + headerlen = 8; + else + headerlen = 6; + break; + case 1: /* System Cooling Capability Structure */ + case 2: /* Thermal Structure */ + case 3: /* Input Power Structure */ + headerlen = 4; + break; + case 4: /* GPIO Device Structure */ + headerlen = 4; + recordlen = 2; + entries = (ROM32(desc[0]) & 0x01f00000) >> 20; + break; + case 5: /* Vendor Specific Structure */ + headerlen = 8; + break; + case 6: /* Backlight Control Structure */ + if (mxms_version(mxm) >= 0x0300) { + headerlen = 4; + recordlen = 8; + entries = (desc[1] & 0xf0) >> 4; + } else { + headerlen = 8; + } + break; + case 7: /* Fan Control Structure */ + headerlen = 8; + recordlen = 4; + entries = desc[1] & 0x07; + break; + default: + nv_debug(mxm, "unknown descriptor type %d\n", type); + return false; + } + + if (nv_subdev(mxm)->debug >= NV_DBG_DEBUG && (exec == NULL)) { + static const char * mxms_desc_name[] = { + "ODS", "SCCS", "TS", "IPS", + "GSD", "VSS", "BCS", "FCS", + }; + u8 *dump = desc; + int i, j; + + nv_debug(mxm, "%4s: ", mxms_desc_name[type]); + for (j = headerlen - 1; j >= 0; j--) + pr_cont("%02x", dump[j]); + pr_cont("\n"); + dump += headerlen; + + for (i = 0; i < entries; i++, dump += recordlen) { + nv_debug(mxm, " "); + for (j = recordlen - 1; j >= 0; j--) + pr_cont("%02x", dump[j]); + pr_cont("\n"); + } + } + + if (types & (1 << type)) { + if (!exec(mxm, desc, info)) + return false; + } + + desc += headerlen + (entries * recordlen); + } + + return true; +} + +void +mxms_output_device(struct nouveau_mxm *mxm, u8 *pdata, struct mxms_odev *desc) +{ + u64 data = ROM32(pdata[0]); + if (mxms_version(mxm) >= 0x0300) + data |= (u64)ROM16(pdata[4]) << 32; + + desc->outp_type = (data & 0x00000000000000f0ULL) >> 4; + desc->ddc_port = (data & 0x0000000000000f00ULL) >> 8; + desc->conn_type = (data & 0x000000000001f000ULL) >> 12; + desc->dig_conn = (data & 0x0000000000780000ULL) >> 19; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h new file mode 100644 index 0000000..5e0be0c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h @@ -0,0 +1,22 @@ +#ifndef __NVMXM_MXMS_H__ +#define __NVMXM_MXMS_H__ + +struct mxms_odev { + u8 outp_type; + u8 conn_type; + u8 ddc_port; + u8 dig_conn; +}; + +void mxms_output_device(struct nouveau_mxm *, u8 *, struct mxms_odev *); + +u16 mxms_version(struct nouveau_mxm *); +u16 mxms_headerlen(struct nouveau_mxm *); +u16 mxms_structlen(struct nouveau_mxm *); +bool mxms_checksum(struct nouveau_mxm *); +bool mxms_valid(struct nouveau_mxm *); + +bool mxms_foreach(struct nouveau_mxm *, u8, + bool (*)(struct nouveau_mxm *, u8 *, void *), void *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c new file mode 100644 index 0000000..af129c2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c @@ -0,0 +1,233 @@ +/* + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/mxm.h> +#include <subdev/bios.h> +#include <subdev/bios/conn.h> +#include <subdev/bios/dcb.h> +#include <subdev/bios/mxm.h> + +#include "mxms.h" + +struct nv50_mxm_priv { + struct nouveau_mxm base; +}; + +struct context { + u32 *outp; + struct mxms_odev desc; +}; + +static bool +mxm_match_tmds_partner(struct nouveau_mxm *mxm, u8 *data, void *info) +{ + struct context *ctx = info; + struct mxms_odev desc; + + mxms_output_device(mxm, data, &desc); + if (desc.outp_type == 2 && + desc.dig_conn == ctx->desc.dig_conn) + return false; + return true; +} + +static bool +mxm_match_dcb(struct nouveau_mxm *mxm, u8 *data, void *info) +{ + struct nouveau_bios *bios = nouveau_bios(mxm); + struct context *ctx = info; + u64 desc = *(u64 *)data; + + mxms_output_device(mxm, data, &ctx->desc); + + /* match dcb encoder type to mxm-ods device type */ + if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type) + return true; + + /* digital output, have some extra stuff to match here, there's a + * table in the vbios that provides a mapping from the mxm digital + * connection enum values to SOR/link + */ + if ((desc & 0x00000000000000f0) >= 0x20) { + /* check against sor index */ + u8 link = mxm_sor_map(bios, ctx->desc.dig_conn); + if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24) + return true; + + /* check dcb entry has a compatible link field */ + link = (link & 0x30) >> 4; + if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link) + return true; + } + + /* mark this descriptor accounted for by setting invalid device type, + * except of course some manufactures don't follow specs properly and + * we need to avoid killing off the TMDS function on DP connectors + * if MXM-SIS is missing an entry for it. + */ + data[0] &= ~0xf0; + if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 && + mxms_foreach(mxm, 0x01, mxm_match_tmds_partner, ctx)) { + data[0] |= 0x20; /* modify descriptor to match TMDS now */ + } else { + data[0] |= 0xf0; + } + + return false; +} + +static int +mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb) +{ + struct nouveau_mxm *mxm = nouveau_mxm(bios); + struct context ctx = { .outp = (u32 *)(bios->data + pdcb) }; + u8 type, i2cidx, link, ver, len; + u8 *conn; + + /* look for an output device structure that matches this dcb entry. + * if one isn't found, disable it. + */ + if (mxms_foreach(mxm, 0x01, mxm_match_dcb, &ctx)) { + nv_debug(mxm, "disable %d: 0x%08x 0x%08x\n", + idx, ctx.outp[0], ctx.outp[1]); + ctx.outp[0] |= 0x0000000f; + return 0; + } + + /* modify the output's ddc/aux port, there's a pointer to a table + * with the mapping from mxm ddc/aux port to dcb i2c_index in the + * vbios mxm table + */ + i2cidx = mxm_ddc_map(bios, ctx.desc.ddc_port); + if ((ctx.outp[0] & 0x0000000f) != DCB_OUTPUT_DP) + i2cidx = (i2cidx & 0x0f) << 4; + else + i2cidx = (i2cidx & 0xf0); + + if (i2cidx != 0xf0) { + ctx.outp[0] &= ~0x000000f0; + ctx.outp[0] |= i2cidx; + } + + /* override dcb sorconf.link, based on what mxm data says */ + switch (ctx.desc.outp_type) { + case 0x00: /* Analog CRT */ + case 0x01: /* Analog TV/HDTV */ + break; + default: + link = mxm_sor_map(bios, ctx.desc.dig_conn) & 0x30; + ctx.outp[1] &= ~0x00000030; + ctx.outp[1] |= link; + break; + } + + /* we may need to fixup various other vbios tables based on what + * the descriptor says the connector type should be. + * + * in a lot of cases, the vbios tables will claim DVI-I is possible, + * and the mxm data says the connector is really HDMI. another + * common example is DP->eDP. + */ + conn = bios->data; + conn += dcb_conn(bios, (ctx.outp[0] & 0x0000f000) >> 12, &ver, &len); + type = conn[0]; + switch (ctx.desc.conn_type) { + case 0x01: /* LVDS */ + ctx.outp[1] |= 0x00000004; /* use_power_scripts */ + /* XXX: modify default link width in LVDS table */ + break; + case 0x02: /* HDMI */ + type = DCB_CONNECTOR_HDMI_1; + break; + case 0x03: /* DVI-D */ + type = DCB_CONNECTOR_DVI_D; + break; + case 0x0e: /* eDP, falls through to DPint */ + ctx.outp[1] |= 0x00010000; + case 0x07: /* DP internal, wtf is this?? HP8670w */ + ctx.outp[1] |= 0x00000004; /* use_power_scripts? */ + type = DCB_CONNECTOR_eDP; + break; + default: + break; + } + + if (mxms_version(mxm) >= 0x0300) + conn[0] = type; + + return 0; +} + +static bool +mxm_show_unmatched(struct nouveau_mxm *mxm, u8 *data, void *info) +{ + u64 desc = *(u64 *)data; + if ((desc & 0xf0) != 0xf0) + nv_info(mxm, "unmatched output device 0x%016llx\n", desc); + return true; +} + +static void +mxm_dcb_sanitise(struct nouveau_mxm *mxm) +{ + struct nouveau_bios *bios = nouveau_bios(mxm); + u8 ver, hdr, cnt, len; + u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len); + if (dcb == 0x0000 || ver != 0x40) { + nv_debug(mxm, "unsupported DCB version\n"); + return; + } + + dcb_outp_foreach(bios, NULL, mxm_dcb_sanitise_entry); + mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL); +} + +static int +nv50_mxm_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_mxm_priv *priv; + int ret; + + ret = nouveau_mxm_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + if (priv->base.action & MXM_SANITISE_DCB) + mxm_dcb_sanitise(&priv->base); + return 0; +} + +struct nouveau_oclass +nv50_mxm_oclass = { + .handle = NV_SUBDEV(MXM, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_mxm_ctor, + .dtor = _nouveau_mxm_dtor, + .init = _nouveau_mxm_init, + .fini = _nouveau_mxm_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c new file mode 100644 index 0000000..a00a5a7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c @@ -0,0 +1,336 @@ +/* + * Copyright 2012 The Nouveau community + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Martin Peres + */ + +#include <core/object.h> +#include <core/device.h> + +#include <subdev/bios.h> + +#include "priv.h" + +static int +nouveau_therm_update_trip(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *priv = (void *)therm; + struct nouveau_therm_trip_point *trip = priv->fan->bios.trip, + *cur_trip = NULL, + *last_trip = priv->last_trip; + u8 temp = therm->temp_get(therm); + u16 duty, i; + + /* look for the trip point corresponding to the current temperature */ + cur_trip = NULL; + for (i = 0; i < priv->fan->bios.nr_fan_trip; i++) { + if (temp >= trip[i].temp) + cur_trip = &trip[i]; + } + + /* account for the hysteresis cycle */ + if (last_trip && temp <= (last_trip->temp) && + temp > (last_trip->temp - last_trip->hysteresis)) + cur_trip = last_trip; + + if (cur_trip) { + duty = cur_trip->fan_duty; + priv->last_trip = cur_trip; + } else { + duty = 0; + priv->last_trip = NULL; + } + + return duty; +} + +static int +nouveau_therm_update_linear(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *priv = (void *)therm; + u8 linear_min_temp = priv->fan->bios.linear_min_temp; + u8 linear_max_temp = priv->fan->bios.linear_max_temp; + u8 temp = therm->temp_get(therm); + u16 duty; + + /* handle the non-linear part first */ + if (temp < linear_min_temp) + return priv->fan->bios.min_duty; + else if (temp > linear_max_temp) + return priv->fan->bios.max_duty; + + /* we are in the linear zone */ + duty = (temp - linear_min_temp); + duty *= (priv->fan->bios.max_duty - priv->fan->bios.min_duty); + duty /= (linear_max_temp - linear_min_temp); + duty += priv->fan->bios.min_duty; + + return duty; +} + +static void +nouveau_therm_update(struct nouveau_therm *therm, int mode) +{ + struct nouveau_timer *ptimer = nouveau_timer(therm); + struct nouveau_therm_priv *priv = (void *)therm; + unsigned long flags; + int duty; + + spin_lock_irqsave(&priv->lock, flags); + if (mode < 0) + mode = priv->mode; + priv->mode = mode; + + switch (mode) { + case NOUVEAU_THERM_CTRL_MANUAL: + duty = nouveau_therm_fan_get(therm); + if (duty < 0) + duty = 100; + break; + case NOUVEAU_THERM_CTRL_AUTO: + if (priv->fan->bios.nr_fan_trip) + duty = nouveau_therm_update_trip(therm); + else + duty = nouveau_therm_update_linear(therm); + break; + case NOUVEAU_THERM_CTRL_NONE: + default: + goto done; + } + + nv_debug(therm, "FAN target request: %d%%\n", duty); + nouveau_therm_fan_set(therm, (mode != NOUVEAU_THERM_CTRL_AUTO), duty); + +done: + if (list_empty(&priv->alarm.head) && (mode == NOUVEAU_THERM_CTRL_AUTO)) + ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void +nouveau_therm_alarm(struct nouveau_alarm *alarm) +{ + struct nouveau_therm_priv *priv = + container_of(alarm, struct nouveau_therm_priv, alarm); + nouveau_therm_update(&priv->base, -1); +} + +int +nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode) +{ + struct nouveau_therm_priv *priv = (void *)therm; + struct nouveau_device *device = nv_device(therm); + static const char *name[] = { + "disabled", + "manual", + "automatic" + }; + + /* The default PDAEMON ucode interferes with fan management */ + if ((mode >= ARRAY_SIZE(name)) || + (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0)) + return -EINVAL; + + /* do not allow automatic fan management if the thermal sensor is + * not available */ + if (priv->mode == 2 && therm->temp_get(therm) < 0) + return -EINVAL; + + if (priv->mode == mode) + return 0; + + nv_info(therm, "fan management: %s\n", name[mode]); + nouveau_therm_update(therm, mode); + return 0; +} + +int +nouveau_therm_attr_get(struct nouveau_therm *therm, + enum nouveau_therm_attr_type type) +{ + struct nouveau_therm_priv *priv = (void *)therm; + + switch (type) { + case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY: + return priv->fan->bios.min_duty; + case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY: + return priv->fan->bios.max_duty; + case NOUVEAU_THERM_ATTR_FAN_MODE: + return priv->mode; + case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST: + return priv->bios_sensor.thrs_fan_boost.temp; + case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST: + return priv->bios_sensor.thrs_fan_boost.hysteresis; + case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK: + return priv->bios_sensor.thrs_down_clock.temp; + case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST: + return priv->bios_sensor.thrs_down_clock.hysteresis; + case NOUVEAU_THERM_ATTR_THRS_CRITICAL: + return priv->bios_sensor.thrs_critical.temp; + case NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST: + return priv->bios_sensor.thrs_critical.hysteresis; + case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN: + return priv->bios_sensor.thrs_shutdown.temp; + case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST: + return priv->bios_sensor.thrs_shutdown.hysteresis; + } + + return -EINVAL; +} + +int +nouveau_therm_attr_set(struct nouveau_therm *therm, + enum nouveau_therm_attr_type type, int value) +{ + struct nouveau_therm_priv *priv = (void *)therm; + + switch (type) { + case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY: + if (value < 0) + value = 0; + if (value > priv->fan->bios.max_duty) + value = priv->fan->bios.max_duty; + priv->fan->bios.min_duty = value; + return 0; + case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY: + if (value < 0) + value = 0; + if (value < priv->fan->bios.min_duty) + value = priv->fan->bios.min_duty; + priv->fan->bios.max_duty = value; + return 0; + case NOUVEAU_THERM_ATTR_FAN_MODE: + return nouveau_therm_fan_mode(therm, value); + case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST: + priv->bios_sensor.thrs_fan_boost.temp = value; + priv->sensor.program_alarms(therm); + return 0; + case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST: + priv->bios_sensor.thrs_fan_boost.hysteresis = value; + priv->sensor.program_alarms(therm); + return 0; + case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK: + priv->bios_sensor.thrs_down_clock.temp = value; + priv->sensor.program_alarms(therm); + return 0; + case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST: + priv->bios_sensor.thrs_down_clock.hysteresis = value; + priv->sensor.program_alarms(therm); + return 0; + case NOUVEAU_THERM_ATTR_THRS_CRITICAL: + priv->bios_sensor.thrs_critical.temp = value; + priv->sensor.program_alarms(therm); + return 0; + case NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST: + priv->bios_sensor.thrs_critical.hysteresis = value; + priv->sensor.program_alarms(therm); + return 0; + case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN: + priv->bios_sensor.thrs_shutdown.temp = value; + priv->sensor.program_alarms(therm); + return 0; + case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST: + priv->bios_sensor.thrs_shutdown.hysteresis = value; + priv->sensor.program_alarms(therm); + return 0; + } + + return -EINVAL; +} + +int +_nouveau_therm_init(struct nouveau_object *object) +{ + struct nouveau_therm *therm = (void *)object; + struct nouveau_therm_priv *priv = (void *)therm; + int ret; + + ret = nouveau_subdev_init(&therm->base); + if (ret) + return ret; + + if (priv->suspend >= 0) + nouveau_therm_fan_mode(therm, priv->mode); + priv->sensor.program_alarms(therm); + return 0; +} + +int +_nouveau_therm_fini(struct nouveau_object *object, bool suspend) +{ + struct nouveau_therm *therm = (void *)object; + struct nouveau_therm_priv *priv = (void *)therm; + + if (suspend) { + priv->suspend = priv->mode; + priv->mode = NOUVEAU_THERM_CTRL_NONE; + } + + return nouveau_subdev_fini(&therm->base, suspend); +} + +int +nouveau_therm_create_(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, + int length, void **pobject) +{ + struct nouveau_therm_priv *priv; + int ret; + + ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PTHERM", + "therm", length, pobject); + priv = *pobject; + if (ret) + return ret; + + nouveau_alarm_init(&priv->alarm, nouveau_therm_alarm); + spin_lock_init(&priv->lock); + spin_lock_init(&priv->sensor.alarm_program_lock); + + priv->base.fan_get = nouveau_therm_fan_user_get; + priv->base.fan_set = nouveau_therm_fan_user_set; + priv->base.fan_sense = nouveau_therm_fan_sense; + priv->base.attr_get = nouveau_therm_attr_get; + priv->base.attr_set = nouveau_therm_attr_set; + priv->mode = priv->suspend = -1; /* undefined */ + return 0; +} + +int +nouveau_therm_preinit(struct nouveau_therm *therm) +{ + nouveau_therm_sensor_ctor(therm); + nouveau_therm_ic_ctor(therm); + nouveau_therm_fan_ctor(therm); + + nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_NONE); + nouveau_therm_sensor_preinit(therm); + return 0; +} + +void +_nouveau_therm_dtor(struct nouveau_object *object) +{ + struct nouveau_therm_priv *priv = (void *)object; + kfree(priv->fan); + nouveau_subdev_destroy(&priv->base.base); +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c new file mode 100644 index 0000000..c728380 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c @@ -0,0 +1,254 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + * Martin Peres + */ + +#include "priv.h" + +#include <core/object.h> +#include <core/device.h> + +#include <subdev/gpio.h> +#include <subdev/timer.h> + +static int +nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target) +{ + struct nouveau_therm *therm = fan->parent; + struct nouveau_therm_priv *priv = (void *)therm; + struct nouveau_timer *ptimer = nouveau_timer(priv); + unsigned long flags; + int ret = 0; + int duty; + + /* update target fan speed, restricting to allowed range */ + spin_lock_irqsave(&fan->lock, flags); + if (target < 0) + target = fan->percent; + target = max_t(u8, target, fan->bios.min_duty); + target = min_t(u8, target, fan->bios.max_duty); + if (fan->percent != target) { + nv_debug(therm, "FAN target: %d\n", target); + fan->percent = target; + } + + /* check that we're not already at the target duty cycle */ + duty = fan->get(therm); + if (duty == target) + goto done; + + /* smooth out the fanspeed increase/decrease */ + if (!immediate && duty >= 0) { + /* the constant "3" is a rough approximation taken from + * nvidia's behaviour. + * it is meant to bump the fan speed more incrementally + */ + if (duty < target) + duty = min(duty + 3, target); + else if (duty > target) + duty = max(duty - 3, target); + } else { + duty = target; + } + + nv_debug(therm, "FAN update: %d\n", duty); + ret = fan->set(therm, duty); + if (ret) + goto done; + + /* schedule next fan update, if not at target speed already */ + if (list_empty(&fan->alarm.head) && target != duty) { + u16 bump_period = fan->bios.bump_period; + u16 slow_down_period = fan->bios.slow_down_period; + u64 delay; + + if (duty > target) + delay = slow_down_period; + else if (duty == target) + delay = min(bump_period, slow_down_period) ; + else + delay = bump_period; + + ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm); + } + +done: + spin_unlock_irqrestore(&fan->lock, flags); + return ret; +} + +static void +nouveau_fan_alarm(struct nouveau_alarm *alarm) +{ + struct nouveau_fan *fan = container_of(alarm, struct nouveau_fan, alarm); + nouveau_fan_update(fan, false, -1); +} + +int +nouveau_therm_fan_get(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *priv = (void *)therm; + return priv->fan->get(therm); +} + +int +nouveau_therm_fan_set(struct nouveau_therm *therm, bool immediate, int percent) +{ + struct nouveau_therm_priv *priv = (void *)therm; + return nouveau_fan_update(priv->fan, immediate, percent); +} + +int +nouveau_therm_fan_sense(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *priv = (void *)therm; + struct nouveau_timer *ptimer = nouveau_timer(therm); + struct nouveau_gpio *gpio = nouveau_gpio(therm); + u32 cycles, cur, prev; + u64 start, end, tach; + + if (priv->fan->tach.func == DCB_GPIO_UNUSED) + return -ENODEV; + + /* Time a complete rotation and extrapolate to RPM: + * When the fan spins, it changes the value of GPIO FAN_SENSE. + * We get 4 changes (0 -> 1 -> 0 -> 1) per complete rotation. + */ + start = ptimer->read(ptimer); + prev = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line); + cycles = 0; + do { + usleep_range(500, 1000); /* supports 0 < rpm < 7500 */ + + cur = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line); + if (prev != cur) { + if (!start) + start = ptimer->read(ptimer); + cycles++; + prev = cur; + } + } while (cycles < 5 && ptimer->read(ptimer) - start < 250000000); + end = ptimer->read(ptimer); + + if (cycles == 5) { + tach = (u64)60000000000ULL; + do_div(tach, (end - start)); + return tach; + } else + return 0; +} + +int +nouveau_therm_fan_user_get(struct nouveau_therm *therm) +{ + return nouveau_therm_fan_get(therm); +} + +int +nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent) +{ + struct nouveau_therm_priv *priv = (void *)therm; + + if (priv->mode != NOUVEAU_THERM_CTRL_MANUAL) + return -EINVAL; + + return nouveau_therm_fan_set(therm, true, percent); +} + +static void +nouveau_therm_fan_set_defaults(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *priv = (void *)therm; + + priv->fan->bios.pwm_freq = 0; + priv->fan->bios.min_duty = 0; + priv->fan->bios.max_duty = 100; + priv->fan->bios.bump_period = 500; + priv->fan->bios.slow_down_period = 2000; + priv->fan->bios.linear_min_temp = 40; + priv->fan->bios.linear_max_temp = 85; +} + +static void +nouveau_therm_fan_safety_checks(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *priv = (void *)therm; + + if (priv->fan->bios.min_duty > 100) + priv->fan->bios.min_duty = 100; + if (priv->fan->bios.max_duty > 100) + priv->fan->bios.max_duty = 100; + + if (priv->fan->bios.min_duty > priv->fan->bios.max_duty) + priv->fan->bios.min_duty = priv->fan->bios.max_duty; +} + +int +nouveau_therm_fan_ctor(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *priv = (void *)therm; + struct nouveau_gpio *gpio = nouveau_gpio(therm); + struct nouveau_bios *bios = nouveau_bios(therm); + struct dcb_gpio_func func; + int ret; + + /* attempt to locate a drivable fan, and determine control method */ + ret = gpio->find(gpio, 0, DCB_GPIO_FAN, 0xff, &func); + if (ret == 0) { + if (func.log[0] & DCB_GPIO_LOG_DIR_IN) { + nv_debug(therm, "GPIO_FAN is in input mode\n"); + ret = -EINVAL; + } else { + ret = nouveau_fanpwm_create(therm, &func); + if (ret != 0) + ret = nouveau_fantog_create(therm, &func); + } + } + + /* no controllable fan found, create a dummy fan module */ + if (ret != 0) { + ret = nouveau_fannil_create(therm); + if (ret) + return ret; + } + + nv_info(therm, "FAN control: %s\n", priv->fan->type); + + /* attempt to detect a tachometer connection */ + ret = gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &priv->fan->tach); + if (ret) + priv->fan->tach.func = DCB_GPIO_UNUSED; + + /* initialise fan bump/slow update handling */ + priv->fan->parent = therm; + nouveau_alarm_init(&priv->fan->alarm, nouveau_fan_alarm); + spin_lock_init(&priv->fan->lock); + + /* other random init... */ + nouveau_therm_fan_set_defaults(therm); + nvbios_perf_fan_parse(bios, &priv->fan->perf); + if (nvbios_therm_fan_parse(bios, &priv->fan->bios)) + nv_error(therm, "parsing the thermal table failed\n"); + nouveau_therm_fan_safety_checks(therm); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c new file mode 100644 index 0000000..b78c182 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c @@ -0,0 +1,54 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "priv.h" + +static int +nouveau_fannil_get(struct nouveau_therm *therm) +{ + return -ENODEV; +} + +static int +nouveau_fannil_set(struct nouveau_therm *therm, int percent) +{ + return -ENODEV; +} + +int +nouveau_fannil_create(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *tpriv = (void *)therm; + struct nouveau_fan *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + tpriv->fan = priv; + if (!priv) + return -ENOMEM; + + priv->type = "none / external"; + priv->get = nouveau_fannil_get; + priv->set = nouveau_fannil_set; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c new file mode 100644 index 0000000..5f71db8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c @@ -0,0 +1,107 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + * Martin Peres + */ + +#include <core/option.h> +#include <subdev/gpio.h> + +#include "priv.h" + +struct nouveau_fanpwm_priv { + struct nouveau_fan base; + struct dcb_gpio_func func; +}; + +static int +nouveau_fanpwm_get(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *tpriv = (void *)therm; + struct nouveau_fanpwm_priv *priv = (void *)tpriv->fan; + struct nouveau_gpio *gpio = nouveau_gpio(therm); + int card_type = nv_device(therm)->card_type; + u32 divs, duty; + int ret; + + ret = therm->pwm_get(therm, priv->func.line, &divs, &duty); + if (ret == 0 && divs) { + divs = max(divs, duty); + if (card_type <= NV_40 || (priv->func.log[0] & 1)) + duty = divs - duty; + return (duty * 100) / divs; + } + + return gpio->get(gpio, 0, priv->func.func, priv->func.line) * 100; +} + +static int +nouveau_fanpwm_set(struct nouveau_therm *therm, int percent) +{ + struct nouveau_therm_priv *tpriv = (void *)therm; + struct nouveau_fanpwm_priv *priv = (void *)tpriv->fan; + int card_type = nv_device(therm)->card_type; + u32 divs, duty; + int ret; + + divs = priv->base.perf.pwm_divisor; + if (priv->base.bios.pwm_freq) { + divs = 1; + if (therm->pwm_clock) + divs = therm->pwm_clock(therm); + divs /= priv->base.bios.pwm_freq; + } + + duty = ((divs * percent) + 99) / 100; + if (card_type <= NV_40 || (priv->func.log[0] & 1)) + duty = divs - duty; + + ret = therm->pwm_set(therm, priv->func.line, divs, duty); + if (ret == 0) + ret = therm->pwm_ctrl(therm, priv->func.line, true); + return ret; +} + +int +nouveau_fanpwm_create(struct nouveau_therm *therm, struct dcb_gpio_func *func) +{ + struct nouveau_device *device = nv_device(therm); + struct nouveau_therm_priv *tpriv = (void *)therm; + struct nouveau_fanpwm_priv *priv; + u32 divs, duty; + + if (!nouveau_boolopt(device->cfgopt, "NvFanPWM", func->param) || + !therm->pwm_ctrl || + therm->pwm_get(therm, func->line, &divs, &duty) == -ENODEV) + return -ENODEV; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + tpriv->fan = &priv->base; + if (!priv) + return -ENOMEM; + + priv->base.type = "PWM"; + priv->base.get = nouveau_fanpwm_get; + priv->base.set = nouveau_fanpwm_set; + priv->func = *func; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c new file mode 100644 index 0000000..e601773 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c @@ -0,0 +1,115 @@ +/* + * Copyright 2012 The Nouveau community + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Martin Peres + */ + +#include "priv.h" + +#include <core/object.h> +#include <core/device.h> + +#include <subdev/gpio.h> +#include <subdev/timer.h> + +struct nouveau_fantog_priv { + struct nouveau_fan base; + struct nouveau_alarm alarm; + spinlock_t lock; + u32 period_us; + u32 percent; + struct dcb_gpio_func func; +}; + +static void +nouveau_fantog_update(struct nouveau_fantog_priv *priv, int percent) +{ + struct nouveau_therm_priv *tpriv = (void *)priv->base.parent; + struct nouveau_timer *ptimer = nouveau_timer(tpriv); + struct nouveau_gpio *gpio = nouveau_gpio(tpriv); + unsigned long flags; + int duty; + + spin_lock_irqsave(&priv->lock, flags); + if (percent < 0) + percent = priv->percent; + priv->percent = percent; + + duty = !gpio->get(gpio, 0, DCB_GPIO_FAN, 0xff); + gpio->set(gpio, 0, DCB_GPIO_FAN, 0xff, duty); + + if (list_empty(&priv->alarm.head) && percent != (duty * 100)) { + u64 next_change = (percent * priv->period_us) / 100; + if (!duty) + next_change = priv->period_us - next_change; + ptimer->alarm(ptimer, next_change * 1000, &priv->alarm); + } + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void +nouveau_fantog_alarm(struct nouveau_alarm *alarm) +{ + struct nouveau_fantog_priv *priv = + container_of(alarm, struct nouveau_fantog_priv, alarm); + nouveau_fantog_update(priv, -1); +} + +static int +nouveau_fantog_get(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *tpriv = (void *)therm; + struct nouveau_fantog_priv *priv = (void *)tpriv->fan; + return priv->percent; +} + +static int +nouveau_fantog_set(struct nouveau_therm *therm, int percent) +{ + struct nouveau_therm_priv *tpriv = (void *)therm; + struct nouveau_fantog_priv *priv = (void *)tpriv->fan; + if (therm->pwm_ctrl) + therm->pwm_ctrl(therm, priv->func.line, false); + nouveau_fantog_update(priv, percent); + return 0; +} + +int +nouveau_fantog_create(struct nouveau_therm *therm, struct dcb_gpio_func *func) +{ + struct nouveau_therm_priv *tpriv = (void *)therm; + struct nouveau_fantog_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + tpriv->fan = &priv->base; + if (!priv) + return -ENOMEM; + + priv->base.type = "toggle"; + priv->base.get = nouveau_fantog_get; + priv->base.set = nouveau_fantog_set; + nouveau_alarm_init(&priv->alarm, nouveau_fantog_alarm); + priv->period_us = 100000; /* 10Hz */ + priv->percent = 100; + priv->func = *func; + spin_lock_init(&priv->lock); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c new file mode 100644 index 0000000..8b3adec --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c @@ -0,0 +1,120 @@ +/* + * Copyright 2012 Nouveau community + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Martin Peres + */ + +#include "priv.h" + +#include <subdev/i2c.h> +#include <subdev/bios/extdev.h> + +static bool +probe_monitoring_device(struct nouveau_i2c_port *i2c, + struct i2c_board_info *info) +{ + struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c); + struct nvbios_therm_sensor *sensor = &priv->bios_sensor; + struct i2c_client *client; + + request_module("%s%s", I2C_MODULE_PREFIX, info->type); + + client = i2c_new_device(&i2c->adapter, info); + if (!client) + return false; + + if (!client->driver || client->driver->detect(client, info)) { + i2c_unregister_device(client); + return false; + } + + nv_info(priv, + "Found an %s at address 0x%x (controlled by lm_sensors, " + "temp offset %+i C)\n", + info->type, info->addr, sensor->offset_constant); + priv->ic = client; + + return true; +} + +static struct i2c_board_info +nv_board_infos[] = { + { I2C_BOARD_INFO("w83l785ts", 0x2d) }, + { I2C_BOARD_INFO("w83781d", 0x2d) }, + { I2C_BOARD_INFO("adt7473", 0x2e) }, + { I2C_BOARD_INFO("adt7473", 0x2d) }, + { I2C_BOARD_INFO("adt7473", 0x2c) }, + { I2C_BOARD_INFO("f75375", 0x2e) }, + { I2C_BOARD_INFO("lm99", 0x4c) }, + { I2C_BOARD_INFO("lm90", 0x4c) }, + { I2C_BOARD_INFO("lm90", 0x4d) }, + { I2C_BOARD_INFO("adm1021", 0x18) }, + { I2C_BOARD_INFO("adm1021", 0x19) }, + { I2C_BOARD_INFO("adm1021", 0x1a) }, + { I2C_BOARD_INFO("adm1021", 0x29) }, + { I2C_BOARD_INFO("adm1021", 0x2a) }, + { I2C_BOARD_INFO("adm1021", 0x2b) }, + { I2C_BOARD_INFO("adm1021", 0x4c) }, + { I2C_BOARD_INFO("adm1021", 0x4d) }, + { I2C_BOARD_INFO("adm1021", 0x4e) }, + { I2C_BOARD_INFO("lm63", 0x18) }, + { I2C_BOARD_INFO("lm63", 0x4e) }, + { } +}; + +void +nouveau_therm_ic_ctor(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *priv = (void *)therm; + struct nouveau_bios *bios = nouveau_bios(therm); + struct nouveau_i2c *i2c = nouveau_i2c(therm); + struct nvbios_extdev_func extdev_entry; + + if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) { + struct i2c_board_info board[] = { + { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) }, + { } + }; + + i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", + board, probe_monitoring_device); + if (priv->ic) + return; + } + + if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) { + struct i2c_board_info board[] = { + { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) }, + { } + }; + + i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", + board, probe_monitoring_device); + if (priv->ic) + return; + } + + /* The vbios doesn't provide the address of an exisiting monitoring + device. Let's try our static list. + */ + i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", + nv_board_infos, probe_monitoring_device); +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c new file mode 100644 index 0000000..002e51b --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c @@ -0,0 +1,224 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + * Martin Peres + */ + +#include "priv.h" + +struct nv40_therm_priv { + struct nouveau_therm_priv base; +}; + +enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 }; + +static enum nv40_sensor_style +nv40_sensor_style(struct nouveau_therm *therm) +{ + struct nouveau_device *device = nv_device(therm); + + switch (device->chipset) { + case 0x43: + case 0x44: + case 0x4a: + case 0x47: + return OLD_STYLE; + + case 0x46: + case 0x49: + case 0x4b: + case 0x4e: + case 0x4c: + case 0x67: + case 0x68: + case 0x63: + return NEW_STYLE; + default: + return INVALID_STYLE; + } +} + +static int +nv40_sensor_setup(struct nouveau_therm *therm) +{ + enum nv40_sensor_style style = nv40_sensor_style(therm); + + /* enable ADC readout and disable the ALARM threshold */ + if (style == NEW_STYLE) { + nv_mask(therm, 0x15b8, 0x80000000, 0); + nv_wr32(therm, 0x15b0, 0x80003fff); + mdelay(20); /* wait for the temperature to stabilize */ + return nv_rd32(therm, 0x15b4) & 0x3fff; + } else if (style == OLD_STYLE) { + nv_wr32(therm, 0x15b0, 0xff); + mdelay(20); /* wait for the temperature to stabilize */ + return nv_rd32(therm, 0x15b4) & 0xff; + } else + return -ENODEV; +} + +static int +nv40_temp_get(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *priv = (void *)therm; + struct nvbios_therm_sensor *sensor = &priv->bios_sensor; + enum nv40_sensor_style style = nv40_sensor_style(therm); + int core_temp; + + if (style == NEW_STYLE) { + nv_wr32(therm, 0x15b0, 0x80003fff); + core_temp = nv_rd32(therm, 0x15b4) & 0x3fff; + } else if (style == OLD_STYLE) { + nv_wr32(therm, 0x15b0, 0xff); + core_temp = nv_rd32(therm, 0x15b4) & 0xff; + } else + return -ENODEV; + + /* if the slope or the offset is unset, do no use the sensor */ + if (!sensor->slope_div || !sensor->slope_mult || + !sensor->offset_num || !sensor->offset_den) + return -ENODEV; + + core_temp = core_temp * sensor->slope_mult / sensor->slope_div; + core_temp = core_temp + sensor->offset_num / sensor->offset_den; + core_temp = core_temp + sensor->offset_constant - 8; + + /* reserve negative temperatures for errors */ + if (core_temp < 0) + core_temp = 0; + + return core_temp; +} + +static int +nv40_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable) +{ + u32 mask = enable ? 0x80000000 : 0x0000000; + if (line == 2) nv_mask(therm, 0x0010f0, 0x80000000, mask); + else if (line == 9) nv_mask(therm, 0x0015f4, 0x80000000, mask); + else { + nv_error(therm, "unknown pwm ctrl for gpio %d\n", line); + return -ENODEV; + } + return 0; +} + +static int +nv40_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty) +{ + if (line == 2) { + u32 reg = nv_rd32(therm, 0x0010f0); + if (reg & 0x80000000) { + *duty = (reg & 0x7fff0000) >> 16; + *divs = (reg & 0x00007fff); + return 0; + } + } else + if (line == 9) { + u32 reg = nv_rd32(therm, 0x0015f4); + if (reg & 0x80000000) { + *divs = nv_rd32(therm, 0x0015f8); + *duty = (reg & 0x7fffffff); + return 0; + } + } else { + nv_error(therm, "unknown pwm ctrl for gpio %d\n", line); + return -ENODEV; + } + + return -EINVAL; +} + +static int +nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty) +{ + if (line == 2) { + nv_mask(therm, 0x0010f0, 0x7fff7fff, (duty << 16) | divs); + } else + if (line == 9) { + nv_wr32(therm, 0x0015f8, divs); + nv_mask(therm, 0x0015f4, 0x7fffffff, duty); + } else { + nv_error(therm, "unknown pwm ctrl for gpio %d\n", line); + return -ENODEV; + } + + return 0; +} + +void +nv40_therm_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_therm *therm = nouveau_therm(subdev); + uint32_t stat = nv_rd32(therm, 0x1100); + + /* traitement */ + + /* ack all IRQs */ + nv_wr32(therm, 0x1100, 0x70000); + + nv_error(therm, "THERM received an IRQ: stat = %x\n", stat); +} + +static int +nv40_therm_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv40_therm_priv *priv; + int ret; + + ret = nouveau_therm_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.base.pwm_ctrl = nv40_fan_pwm_ctrl; + priv->base.base.pwm_get = nv40_fan_pwm_get; + priv->base.base.pwm_set = nv40_fan_pwm_set; + priv->base.base.temp_get = nv40_temp_get; + priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling; + nv_subdev(priv)->intr = nv40_therm_intr; + return nouveau_therm_preinit(&priv->base.base); +} + +static int +nv40_therm_init(struct nouveau_object *object) +{ + struct nouveau_therm *therm = (void *)object; + + nv40_sensor_setup(therm); + + return _nouveau_therm_init(object); +} + +struct nouveau_oclass +nv40_therm_oclass = { + .handle = NV_SUBDEV(THERM, 0x40), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv40_therm_ctor, + .dtor = _nouveau_therm_dtor, + .init = nv40_therm_init, + .fini = _nouveau_therm_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c new file mode 100644 index 0000000..8cf7597 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c @@ -0,0 +1,197 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + * Martin Peres + */ + +#include "priv.h" + +struct nv50_therm_priv { + struct nouveau_therm_priv base; +}; + +static int +pwm_info(struct nouveau_therm *therm, int *line, int *ctrl, int *indx) +{ + if (*line == 0x04) { + *ctrl = 0x00e100; + *line = 4; + *indx = 0; + } else + if (*line == 0x09) { + *ctrl = 0x00e100; + *line = 9; + *indx = 1; + } else + if (*line == 0x10) { + *ctrl = 0x00e28c; + *line = 0; + *indx = 0; + } else { + nv_error(therm, "unknown pwm ctrl for gpio %d\n", *line); + return -ENODEV; + } + + return 0; +} + +int +nv50_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable) +{ + u32 data = enable ? 0x00000001 : 0x00000000; + int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id); + if (ret == 0) + nv_mask(therm, ctrl, 0x00010001 << line, data << line); + return ret; +} + +int +nv50_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty) +{ + int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id); + if (ret) + return ret; + + if (nv_rd32(therm, ctrl) & (1 << line)) { + *divs = nv_rd32(therm, 0x00e114 + (id * 8)); + *duty = nv_rd32(therm, 0x00e118 + (id * 8)); + return 0; + } + + return -EINVAL; +} + +int +nv50_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty) +{ + int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id); + if (ret) + return ret; + + nv_wr32(therm, 0x00e114 + (id * 8), divs); + nv_wr32(therm, 0x00e118 + (id * 8), duty | 0x80000000); + return 0; +} + +int +nv50_fan_pwm_clock(struct nouveau_therm *therm) +{ + int chipset = nv_device(therm)->chipset; + int crystal = nv_device(therm)->crystal; + int pwm_clock; + + /* determine the PWM source clock */ + if (chipset > 0x50 && chipset < 0x94) { + u8 pwm_div = nv_rd32(therm, 0x410c); + if (nv_rd32(therm, 0xc040) & 0x800000) { + /* Use the HOST clock (100 MHz) + * Where does this constant(2.4) comes from? */ + pwm_clock = (100000000 >> pwm_div) * 10 / 24; + } else { + /* Where does this constant(20) comes from? */ + pwm_clock = (crystal * 1000) >> pwm_div; + pwm_clock /= 20; + } + } else { + pwm_clock = (crystal * 1000) / 20; + } + + return pwm_clock; +} + +static void +nv50_sensor_setup(struct nouveau_therm *therm) +{ + nv_mask(therm, 0x20010, 0x40000000, 0x0); + mdelay(20); /* wait for the temperature to stabilize */ +} + +static int +nv50_temp_get(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *priv = (void *)therm; + struct nvbios_therm_sensor *sensor = &priv->bios_sensor; + int core_temp; + + core_temp = nv_rd32(therm, 0x20014) & 0x3fff; + + /* if the slope or the offset is unset, do no use the sensor */ + if (!sensor->slope_div || !sensor->slope_mult || + !sensor->offset_num || !sensor->offset_den) + return -ENODEV; + + core_temp = core_temp * sensor->slope_mult / sensor->slope_div; + core_temp = core_temp + sensor->offset_num / sensor->offset_den; + core_temp = core_temp + sensor->offset_constant - 8; + + /* reserve negative temperatures for errors */ + if (core_temp < 0) + core_temp = 0; + + return core_temp; +} + +static int +nv50_therm_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_therm_priv *priv; + int ret; + + ret = nouveau_therm_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl; + priv->base.base.pwm_get = nv50_fan_pwm_get; + priv->base.base.pwm_set = nv50_fan_pwm_set; + priv->base.base.pwm_clock = nv50_fan_pwm_clock; + priv->base.base.temp_get = nv50_temp_get; + priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling; + nv_subdev(priv)->intr = nv40_therm_intr; + + return nouveau_therm_preinit(&priv->base.base); +} + +static int +nv50_therm_init(struct nouveau_object *object) +{ + struct nouveau_therm *therm = (void *)object; + + nv50_sensor_setup(therm); + + return _nouveau_therm_init(object); +} + +struct nouveau_oclass +nv50_therm_oclass = { + .handle = NV_SUBDEV(THERM, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_therm_ctor, + .dtor = _nouveau_therm_dtor, + .init = nv50_therm_init, + .fini = _nouveau_therm_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c new file mode 100644 index 0000000..42ba633 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c @@ -0,0 +1,221 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + * Martin Peres + */ + +#include "priv.h" + +struct nv84_therm_priv { + struct nouveau_therm_priv base; +}; + +int +nv84_temp_get(struct nouveau_therm *therm) +{ + return nv_rd32(therm, 0x20400); +} + +static void +nv84_therm_program_alarms(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *priv = (void *)therm; + struct nvbios_therm_sensor *sensor = &priv->bios_sensor; + unsigned long flags; + + spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags); + + /* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */ + nv_wr32(therm, 0x20000, 0x000003ff); + + /* shutdown: The computer should be shutdown when reached */ + nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis); + nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp); + + /* THRS_1 : fan boost*/ + nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp); + + /* THRS_2 : critical */ + nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp); + + /* THRS_4 : down clock */ + nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp); + spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); + + nv_debug(therm, + "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n", + sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis, + sensor->thrs_down_clock.temp, + sensor->thrs_down_clock.hysteresis, + sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis, + sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis); + +} + +/* must be called with alarm_program_lock taken ! */ +static void +nv84_therm_threshold_hyst_emulation(struct nouveau_therm *therm, + uint32_t thrs_reg, u8 status_bit, + const struct nvbios_therm_threshold *thrs, + enum nouveau_therm_thrs thrs_name) +{ + enum nouveau_therm_thrs_direction direction; + enum nouveau_therm_thrs_state prev_state, new_state; + int temp, cur; + + prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name); + temp = nv_rd32(therm, thrs_reg); + + /* program the next threshold */ + if (temp == thrs->temp) { + nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis); + new_state = NOUVEAU_THERM_THRS_HIGHER; + } else { + nv_wr32(therm, thrs_reg, thrs->temp); + new_state = NOUVEAU_THERM_THRS_LOWER; + } + + /* fix the state (in case someone reprogrammed the alarms) */ + cur = therm->temp_get(therm); + if (new_state == NOUVEAU_THERM_THRS_LOWER && cur > thrs->temp) + new_state = NOUVEAU_THERM_THRS_HIGHER; + else if (new_state == NOUVEAU_THERM_THRS_HIGHER && + cur < thrs->temp - thrs->hysteresis) + new_state = NOUVEAU_THERM_THRS_LOWER; + nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state); + + /* find the direction */ + if (prev_state < new_state) + direction = NOUVEAU_THERM_THRS_RISING; + else if (prev_state > new_state) + direction = NOUVEAU_THERM_THRS_FALLING; + else + return; + + /* advertise a change in direction */ + nouveau_therm_sensor_event(therm, thrs_name, direction); +} + +static void +nv84_therm_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_therm *therm = nouveau_therm(subdev); + struct nouveau_therm_priv *priv = (void *)therm; + struct nvbios_therm_sensor *sensor = &priv->bios_sensor; + unsigned long flags; + uint32_t intr; + + spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags); + + intr = nv_rd32(therm, 0x20100); + + /* THRS_4: downclock */ + if (intr & 0x002) { + nv84_therm_threshold_hyst_emulation(therm, 0x20414, 24, + &sensor->thrs_down_clock, + NOUVEAU_THERM_THRS_DOWNCLOCK); + intr &= ~0x002; + } + + /* shutdown */ + if (intr & 0x004) { + nv84_therm_threshold_hyst_emulation(therm, 0x20480, 20, + &sensor->thrs_shutdown, + NOUVEAU_THERM_THRS_SHUTDOWN); + intr &= ~0x004; + } + + /* THRS_1 : fan boost */ + if (intr & 0x008) { + nv84_therm_threshold_hyst_emulation(therm, 0x204c4, 21, + &sensor->thrs_fan_boost, + NOUVEAU_THERM_THRS_FANBOOST); + intr &= ~0x008; + } + + /* THRS_2 : critical */ + if (intr & 0x010) { + nv84_therm_threshold_hyst_emulation(therm, 0x204c0, 22, + &sensor->thrs_critical, + NOUVEAU_THERM_THRS_CRITICAL); + intr &= ~0x010; + } + + if (intr) + nv_error(therm, "unhandled intr 0x%08x\n", intr); + + /* ACK everything */ + nv_wr32(therm, 0x20100, 0xffffffff); + nv_wr32(therm, 0x1100, 0x10000); /* PBUS */ + + spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); +} + +static int +nv84_therm_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv84_therm_priv *priv; + int ret; + + ret = nouveau_therm_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl; + priv->base.base.pwm_get = nv50_fan_pwm_get; + priv->base.base.pwm_set = nv50_fan_pwm_set; + priv->base.base.pwm_clock = nv50_fan_pwm_clock; + priv->base.base.temp_get = nv84_temp_get; + priv->base.sensor.program_alarms = nv84_therm_program_alarms; + nv_subdev(priv)->intr = nv84_therm_intr; + + /* init the thresholds */ + nouveau_therm_sensor_set_threshold_state(&priv->base.base, + NOUVEAU_THERM_THRS_SHUTDOWN, + NOUVEAU_THERM_THRS_LOWER); + nouveau_therm_sensor_set_threshold_state(&priv->base.base, + NOUVEAU_THERM_THRS_FANBOOST, + NOUVEAU_THERM_THRS_LOWER); + nouveau_therm_sensor_set_threshold_state(&priv->base.base, + NOUVEAU_THERM_THRS_CRITICAL, + NOUVEAU_THERM_THRS_LOWER); + nouveau_therm_sensor_set_threshold_state(&priv->base.base, + NOUVEAU_THERM_THRS_DOWNCLOCK, + NOUVEAU_THERM_THRS_LOWER); + + return nouveau_therm_preinit(&priv->base.base); +} + +struct nouveau_oclass +nv84_therm_oclass = { + .handle = NV_SUBDEV(THERM, 0x84), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv84_therm_ctor, + .dtor = _nouveau_therm_dtor, + .init = _nouveau_therm_init, + .fini = _nouveau_therm_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c new file mode 100644 index 0000000..d11a7c4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c @@ -0,0 +1,99 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/gpio.h> + +#include "priv.h" + +struct nva3_therm_priv { + struct nouveau_therm_priv base; +}; + +int +nva3_therm_fan_sense(struct nouveau_therm *therm) +{ + u32 tach = nv_rd32(therm, 0x00e728) & 0x0000ffff; + u32 ctrl = nv_rd32(therm, 0x00e720); + if (ctrl & 0x00000001) + return tach * 60; + return -ENODEV; +} + +static int +nva3_therm_init(struct nouveau_object *object) +{ + struct nva3_therm_priv *priv = (void *)object; + struct dcb_gpio_func *tach = &priv->base.fan->tach; + int ret; + + ret = nouveau_therm_init(&priv->base.base); + if (ret) + return ret; + + /* enable fan tach, count revolutions per-second */ + nv_mask(priv, 0x00e720, 0x00000003, 0x00000002); + if (tach->func != DCB_GPIO_UNUSED) { + nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000); + nv_mask(priv, 0x00e720, 0x001f0000, tach->line << 16); + nv_mask(priv, 0x00e720, 0x00000001, 0x00000001); + } + nv_mask(priv, 0x00e720, 0x00000002, 0x00000000); + + return 0; +} + +static int +nva3_therm_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nva3_therm_priv *priv; + int ret; + + ret = nouveau_therm_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl; + priv->base.base.pwm_get = nv50_fan_pwm_get; + priv->base.base.pwm_set = nv50_fan_pwm_set; + priv->base.base.pwm_clock = nv50_fan_pwm_clock; + priv->base.base.temp_get = nv84_temp_get; + priv->base.base.fan_sense = nva3_therm_fan_sense; + priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling; + return nouveau_therm_preinit(&priv->base.base); +} + +struct nouveau_oclass +nva3_therm_oclass = { + .handle = NV_SUBDEV(THERM, 0xa3), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nva3_therm_ctor, + .dtor = _nouveau_therm_dtor, + .init = nva3_therm_init, + .fini = _nouveau_therm_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c new file mode 100644 index 0000000..54c28bd --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c @@ -0,0 +1,153 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "priv.h" + +struct nvd0_therm_priv { + struct nouveau_therm_priv base; +}; + +static int +pwm_info(struct nouveau_therm *therm, int line) +{ + u32 gpio = nv_rd32(therm, 0x00d610 + (line * 0x04)); + switch (gpio & 0x000000c0) { + case 0x00000000: /* normal mode, possibly pwm forced off by us */ + case 0x00000040: /* nvio special */ + switch (gpio & 0x0000001f) { + case 0x19: return 1; + case 0x1c: return 0; + default: + break; + } + default: + break; + } + + nv_error(therm, "GPIO %d unknown PWM: 0x%08x\n", line, gpio); + return -ENODEV; +} + +static int +nvd0_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable) +{ + u32 data = enable ? 0x00000040 : 0x00000000; + int indx = pwm_info(therm, line); + if (indx < 0) + return indx; + + nv_mask(therm, 0x00d610 + (line * 0x04), 0x000000c0, data); + return 0; +} + +static int +nvd0_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty) +{ + int indx = pwm_info(therm, line); + if (indx < 0) + return indx; + + if (nv_rd32(therm, 0x00d610 + (line * 0x04)) & 0x00000040) { + *divs = nv_rd32(therm, 0x00e114 + (indx * 8)); + *duty = nv_rd32(therm, 0x00e118 + (indx * 8)); + return 0; + } + + return -EINVAL; +} + +static int +nvd0_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty) +{ + int indx = pwm_info(therm, line); + if (indx < 0) + return indx; + + nv_wr32(therm, 0x00e114 + (indx * 8), divs); + nv_wr32(therm, 0x00e118 + (indx * 8), duty | 0x80000000); + return 0; +} + +static int +nvd0_fan_pwm_clock(struct nouveau_therm *therm) +{ + return (nv_device(therm)->crystal * 1000) / 20; +} + +static int +nvd0_therm_init(struct nouveau_object *object) +{ + struct nvd0_therm_priv *priv = (void *)object; + int ret; + + ret = nouveau_therm_init(&priv->base.base); + if (ret) + return ret; + + /* enable fan tach, count revolutions per-second */ + nv_mask(priv, 0x00e720, 0x00000003, 0x00000002); + if (priv->base.fan->tach.func != DCB_GPIO_UNUSED) { + nv_mask(priv, 0x00d79c, 0x000000ff, priv->base.fan->tach.line); + nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000); + nv_mask(priv, 0x00e720, 0x00000001, 0x00000001); + } + nv_mask(priv, 0x00e720, 0x00000002, 0x00000000); + + return 0; +} + +static int +nvd0_therm_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvd0_therm_priv *priv; + int ret; + + ret = nouveau_therm_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.base.pwm_ctrl = nvd0_fan_pwm_ctrl; + priv->base.base.pwm_get = nvd0_fan_pwm_get; + priv->base.base.pwm_set = nvd0_fan_pwm_set; + priv->base.base.pwm_clock = nvd0_fan_pwm_clock; + priv->base.base.temp_get = nv84_temp_get; + priv->base.base.fan_sense = nva3_therm_fan_sense; + priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling; + return nouveau_therm_preinit(&priv->base.base); +} + +struct nouveau_oclass +nvd0_therm_oclass = { + .handle = NV_SUBDEV(THERM, 0xd0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvd0_therm_ctor, + .dtor = _nouveau_therm_dtor, + .init = nvd0_therm_init, + .fini = _nouveau_therm_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h new file mode 100644 index 0000000..15ca64e --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h @@ -0,0 +1,150 @@ +#ifndef __NVTHERM_PRIV_H__ +#define __NVTHERM_PRIV_H__ + +/* + * Copyright 2012 The Nouveau community + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Martin Peres + */ + +#include <subdev/therm.h> + +#include <subdev/bios/extdev.h> +#include <subdev/bios/gpio.h> +#include <subdev/bios/perf.h> +#include <subdev/bios/therm.h> +#include <subdev/timer.h> + +struct nouveau_fan { + struct nouveau_therm *parent; + const char *type; + + struct nvbios_therm_fan bios; + struct nvbios_perf_fan perf; + + struct nouveau_alarm alarm; + spinlock_t lock; + int percent; + + int (*get)(struct nouveau_therm *therm); + int (*set)(struct nouveau_therm *therm, int percent); + + struct dcb_gpio_func tach; +}; + +enum nouveau_therm_thrs_direction { + NOUVEAU_THERM_THRS_FALLING = 0, + NOUVEAU_THERM_THRS_RISING = 1 +}; + +enum nouveau_therm_thrs_state { + NOUVEAU_THERM_THRS_LOWER = 0, + NOUVEAU_THERM_THRS_HIGHER = 1 +}; + +enum nouveau_therm_thrs { + NOUVEAU_THERM_THRS_FANBOOST = 0, + NOUVEAU_THERM_THRS_DOWNCLOCK = 1, + NOUVEAU_THERM_THRS_CRITICAL = 2, + NOUVEAU_THERM_THRS_SHUTDOWN = 3, + NOUVEAU_THERM_THRS_NR +}; + +struct nouveau_therm_priv { + struct nouveau_therm base; + + /* automatic thermal management */ + struct nouveau_alarm alarm; + spinlock_t lock; + struct nouveau_therm_trip_point *last_trip; + int mode; + int suspend; + + /* bios */ + struct nvbios_therm_sensor bios_sensor; + + /* fan priv */ + struct nouveau_fan *fan; + + /* alarms priv */ + struct { + spinlock_t alarm_program_lock; + struct nouveau_alarm therm_poll_alarm; + enum nouveau_therm_thrs_state alarm_state[NOUVEAU_THERM_THRS_NR]; + void (*program_alarms)(struct nouveau_therm *); + } sensor; + + /* what should be done if the card overheats */ + struct { + void (*downclock)(struct nouveau_therm *, bool active); + void (*pause)(struct nouveau_therm *, bool active); + } emergency; + + /* ic */ + struct i2c_client *ic; +}; + +int nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode); +int nouveau_therm_attr_get(struct nouveau_therm *therm, + enum nouveau_therm_attr_type type); +int nouveau_therm_attr_set(struct nouveau_therm *therm, + enum nouveau_therm_attr_type type, int value); + +void nouveau_therm_ic_ctor(struct nouveau_therm *therm); + +int nouveau_therm_sensor_ctor(struct nouveau_therm *therm); + +int nouveau_therm_fan_ctor(struct nouveau_therm *therm); +int nouveau_therm_fan_get(struct nouveau_therm *therm); +int nouveau_therm_fan_set(struct nouveau_therm *therm, bool now, int percent); +int nouveau_therm_fan_user_get(struct nouveau_therm *therm); +int nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent); + +int nouveau_therm_fan_sense(struct nouveau_therm *therm); + +int nouveau_therm_preinit(struct nouveau_therm *); + +void nouveau_therm_sensor_preinit(struct nouveau_therm *); +void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm, + enum nouveau_therm_thrs thrs, + enum nouveau_therm_thrs_state st); +enum nouveau_therm_thrs_state +nouveau_therm_sensor_get_threshold_state(struct nouveau_therm *therm, + enum nouveau_therm_thrs thrs); +void nouveau_therm_sensor_event(struct nouveau_therm *therm, + enum nouveau_therm_thrs thrs, + enum nouveau_therm_thrs_direction dir); +void nouveau_therm_program_alarms_polling(struct nouveau_therm *therm); + +void nv40_therm_intr(struct nouveau_subdev *); +int nv50_fan_pwm_ctrl(struct nouveau_therm *, int, bool); +int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *); +int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32); +int nv50_fan_pwm_clock(struct nouveau_therm *); +int nv84_temp_get(struct nouveau_therm *therm); + +int nva3_therm_fan_sense(struct nouveau_therm *); + +int nouveau_fanpwm_create(struct nouveau_therm *, struct dcb_gpio_func *); +int nouveau_fantog_create(struct nouveau_therm *, struct dcb_gpio_func *); +int nouveau_fannil_create(struct nouveau_therm *); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c new file mode 100644 index 0000000..dde746c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c @@ -0,0 +1,245 @@ +/* + * Copyright 2012 The Nouveau community + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Martin Peres + */ + +#include "priv.h" + +#include <core/object.h> +#include <core/device.h> + +#include <subdev/bios.h> + +static void +nouveau_therm_temp_set_defaults(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *priv = (void *)therm; + + priv->bios_sensor.offset_constant = 0; + + priv->bios_sensor.thrs_fan_boost.temp = 90; + priv->bios_sensor.thrs_fan_boost.hysteresis = 3; + + priv->bios_sensor.thrs_down_clock.temp = 95; + priv->bios_sensor.thrs_down_clock.hysteresis = 3; + + priv->bios_sensor.thrs_critical.temp = 105; + priv->bios_sensor.thrs_critical.hysteresis = 5; + + priv->bios_sensor.thrs_shutdown.temp = 135; + priv->bios_sensor.thrs_shutdown.hysteresis = 5; /*not that it matters */ +} + + +static void +nouveau_therm_temp_safety_checks(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *priv = (void *)therm; + struct nvbios_therm_sensor *s = &priv->bios_sensor; + + /* enforce a minimum hysteresis on thresholds */ + s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2); + s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2); + s->thrs_critical.hysteresis = max_t(u8, s->thrs_critical.hysteresis, 2); + s->thrs_shutdown.hysteresis = max_t(u8, s->thrs_shutdown.hysteresis, 2); +} + +/* must be called with alarm_program_lock taken ! */ +void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm, + enum nouveau_therm_thrs thrs, + enum nouveau_therm_thrs_state st) +{ + struct nouveau_therm_priv *priv = (void *)therm; + priv->sensor.alarm_state[thrs] = st; +} + +/* must be called with alarm_program_lock taken ! */ +enum nouveau_therm_thrs_state +nouveau_therm_sensor_get_threshold_state(struct nouveau_therm *therm, + enum nouveau_therm_thrs thrs) +{ + struct nouveau_therm_priv *priv = (void *)therm; + return priv->sensor.alarm_state[thrs]; +} + +static void +nv_poweroff_work(struct work_struct *work) +{ + orderly_poweroff(true); + kfree(work); +} + +void nouveau_therm_sensor_event(struct nouveau_therm *therm, + enum nouveau_therm_thrs thrs, + enum nouveau_therm_thrs_direction dir) +{ + struct nouveau_therm_priv *priv = (void *)therm; + bool active; + const char *thresolds[] = { + "fanboost", "downclock", "critical", "shutdown" + }; + int temperature = therm->temp_get(therm); + + if (thrs < 0 || thrs > 3) + return; + + if (dir == NOUVEAU_THERM_THRS_FALLING) + nv_info(therm, "temperature (%i C) went below the '%s' threshold\n", + temperature, thresolds[thrs]); + else + nv_info(therm, "temperature (%i C) hit the '%s' threshold\n", + temperature, thresolds[thrs]); + + active = (dir == NOUVEAU_THERM_THRS_RISING); + switch (thrs) { + case NOUVEAU_THERM_THRS_FANBOOST: + if (active) { + nouveau_therm_fan_set(therm, true, 100); + nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO); + } + break; + case NOUVEAU_THERM_THRS_DOWNCLOCK: + if (priv->emergency.downclock) + priv->emergency.downclock(therm, active); + break; + case NOUVEAU_THERM_THRS_CRITICAL: + if (priv->emergency.pause) + priv->emergency.pause(therm, active); + break; + case NOUVEAU_THERM_THRS_SHUTDOWN: + if (active) { + struct work_struct *work; + + work = kmalloc(sizeof(*work), GFP_ATOMIC); + if (work) { + INIT_WORK(work, nv_poweroff_work); + schedule_work(work); + } + } + break; + case NOUVEAU_THERM_THRS_NR: + break; + } + +} + +/* must be called with alarm_program_lock taken ! */ +static void +nouveau_therm_threshold_hyst_polling(struct nouveau_therm *therm, + const struct nvbios_therm_threshold *thrs, + enum nouveau_therm_thrs thrs_name) +{ + enum nouveau_therm_thrs_direction direction; + enum nouveau_therm_thrs_state prev_state, new_state; + int temp = therm->temp_get(therm); + + prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name); + + if (temp >= thrs->temp && prev_state == NOUVEAU_THERM_THRS_LOWER) { + direction = NOUVEAU_THERM_THRS_RISING; + new_state = NOUVEAU_THERM_THRS_HIGHER; + } else if (temp <= thrs->temp - thrs->hysteresis && + prev_state == NOUVEAU_THERM_THRS_HIGHER) { + direction = NOUVEAU_THERM_THRS_FALLING; + new_state = NOUVEAU_THERM_THRS_LOWER; + } else + return; /* nothing to do */ + + nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state); + nouveau_therm_sensor_event(therm, thrs_name, direction); +} + +static void +alarm_timer_callback(struct nouveau_alarm *alarm) +{ + struct nouveau_therm_priv *priv = + container_of(alarm, struct nouveau_therm_priv, sensor.therm_poll_alarm); + struct nvbios_therm_sensor *sensor = &priv->bios_sensor; + struct nouveau_timer *ptimer = nouveau_timer(priv); + struct nouveau_therm *therm = &priv->base; + unsigned long flags; + + spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags); + + nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost, + NOUVEAU_THERM_THRS_FANBOOST); + + nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_down_clock, + NOUVEAU_THERM_THRS_DOWNCLOCK); + + nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_critical, + NOUVEAU_THERM_THRS_CRITICAL); + + nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown, + NOUVEAU_THERM_THRS_SHUTDOWN); + + /* schedule the next poll in one second */ + if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head)) + ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm); + + spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); +} + +void +nouveau_therm_program_alarms_polling(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *priv = (void *)therm; + struct nvbios_therm_sensor *sensor = &priv->bios_sensor; + + nv_debug(therm, + "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n", + sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis, + sensor->thrs_down_clock.temp, + sensor->thrs_down_clock.hysteresis, + sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis, + sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis); + + alarm_timer_callback(&priv->sensor.therm_poll_alarm); +} + +void +nouveau_therm_sensor_preinit(struct nouveau_therm *therm) +{ + const char *sensor_avail = "yes"; + + if (therm->temp_get(therm) < 0) + sensor_avail = "no"; + + nv_info(therm, "internal sensor: %s\n", sensor_avail); +} + +int +nouveau_therm_sensor_ctor(struct nouveau_therm *therm) +{ + struct nouveau_therm_priv *priv = (void *)therm; + struct nouveau_bios *bios = nouveau_bios(therm); + + nouveau_alarm_init(&priv->sensor.therm_poll_alarm, alarm_timer_callback); + + nouveau_therm_temp_set_defaults(therm); + if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE, + &priv->bios_sensor)) + nv_error(therm, "nvbios_therm_sensor_parse failed\n"); + nouveau_therm_temp_safety_checks(therm); + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/base.c b/drivers/gpu/drm/nouveau/core/subdev/timer/base.c new file mode 100644 index 0000000..5d417cc --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/timer/base.c @@ -0,0 +1,87 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "subdev/timer.h" + +bool +nouveau_timer_wait_eq(void *obj, u64 nsec, u32 addr, u32 mask, u32 data) +{ + struct nouveau_timer *ptimer = nouveau_timer(obj); + u64 time0; + + time0 = ptimer->read(ptimer); + do { + if (nv_iclass(obj, NV_SUBDEV_CLASS)) { + if ((nv_rd32(obj, addr) & mask) == data) + return true; + } else { + if ((nv_ro32(obj, addr) & mask) == data) + return true; + } + } while (ptimer->read(ptimer) - time0 < nsec); + + return false; +} + +bool +nouveau_timer_wait_ne(void *obj, u64 nsec, u32 addr, u32 mask, u32 data) +{ + struct nouveau_timer *ptimer = nouveau_timer(obj); + u64 time0; + + time0 = ptimer->read(ptimer); + do { + if (nv_iclass(obj, NV_SUBDEV_CLASS)) { + if ((nv_rd32(obj, addr) & mask) != data) + return true; + } else { + if ((nv_ro32(obj, addr) & mask) != data) + return true; + } + } while (ptimer->read(ptimer) - time0 < nsec); + + return false; +} + +bool +nouveau_timer_wait_cb(void *obj, u64 nsec, bool (*func)(void *), void *data) +{ + struct nouveau_timer *ptimer = nouveau_timer(obj); + u64 time0; + + time0 = ptimer->read(ptimer); + do { + if (func(data) == true) + return true; + } while (ptimer->read(ptimer) - time0 < nsec); + + return false; +} + +void +nouveau_timer_alarm(void *obj, u32 nsec, struct nouveau_alarm *alarm) +{ + struct nouveau_timer *ptimer = nouveau_timer(obj); + ptimer->alarm(ptimer, nsec, alarm); +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c new file mode 100644 index 0000000..9469b82 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c @@ -0,0 +1,254 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <subdev/timer.h> + +#define NV04_PTIMER_INTR_0 0x009100 +#define NV04_PTIMER_INTR_EN_0 0x009140 +#define NV04_PTIMER_NUMERATOR 0x009200 +#define NV04_PTIMER_DENOMINATOR 0x009210 +#define NV04_PTIMER_TIME_0 0x009400 +#define NV04_PTIMER_TIME_1 0x009410 +#define NV04_PTIMER_ALARM_0 0x009420 + +struct nv04_timer_priv { + struct nouveau_timer base; + struct list_head alarms; + spinlock_t lock; +}; + +static u64 +nv04_timer_read(struct nouveau_timer *ptimer) +{ + struct nv04_timer_priv *priv = (void *)ptimer; + u32 hi, lo; + + do { + hi = nv_rd32(priv, NV04_PTIMER_TIME_1); + lo = nv_rd32(priv, NV04_PTIMER_TIME_0); + } while (hi != nv_rd32(priv, NV04_PTIMER_TIME_1)); + + return ((u64)hi << 32 | lo); +} + +static void +nv04_timer_alarm_trigger(struct nouveau_timer *ptimer) +{ + struct nv04_timer_priv *priv = (void *)ptimer; + struct nouveau_alarm *alarm, *atemp; + unsigned long flags; + LIST_HEAD(exec); + + /* move any due alarms off the pending list */ + spin_lock_irqsave(&priv->lock, flags); + list_for_each_entry_safe(alarm, atemp, &priv->alarms, head) { + if (alarm->timestamp <= ptimer->read(ptimer)) + list_move_tail(&alarm->head, &exec); + } + + /* reschedule interrupt for next alarm time */ + if (!list_empty(&priv->alarms)) { + alarm = list_first_entry(&priv->alarms, typeof(*alarm), head); + nv_wr32(priv, NV04_PTIMER_ALARM_0, alarm->timestamp); + nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000001); + } else { + nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000); + } + spin_unlock_irqrestore(&priv->lock, flags); + + /* execute any pending alarm handlers */ + list_for_each_entry_safe(alarm, atemp, &exec, head) { + list_del_init(&alarm->head); + alarm->func(alarm); + } +} + +static void +nv04_timer_alarm(struct nouveau_timer *ptimer, u64 time, + struct nouveau_alarm *alarm) +{ + struct nv04_timer_priv *priv = (void *)ptimer; + struct nouveau_alarm *list; + unsigned long flags; + + alarm->timestamp = ptimer->read(ptimer) + time; + + /* append new alarm to list, in soonest-alarm-first order */ + spin_lock_irqsave(&priv->lock, flags); + if (!time) { + if (!list_empty(&alarm->head)) + list_del(&alarm->head); + } else { + list_for_each_entry(list, &priv->alarms, head) { + if (list->timestamp > alarm->timestamp) + break; + } + list_add_tail(&alarm->head, &list->head); + } + spin_unlock_irqrestore(&priv->lock, flags); + + /* process pending alarms */ + nv04_timer_alarm_trigger(ptimer); +} + +static void +nv04_timer_intr(struct nouveau_subdev *subdev) +{ + struct nv04_timer_priv *priv = (void *)subdev; + u32 stat = nv_rd32(priv, NV04_PTIMER_INTR_0); + + if (stat & 0x00000001) { + nv04_timer_alarm_trigger(&priv->base); + nv_wr32(priv, NV04_PTIMER_INTR_0, 0x00000001); + stat &= ~0x00000001; + } + + if (stat) { + nv_error(priv, "unknown stat 0x%08x\n", stat); + nv_wr32(priv, NV04_PTIMER_INTR_0, stat); + } +} + +static int +nv04_timer_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_timer_priv *priv; + int ret; + + ret = nouveau_timer_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.base.intr = nv04_timer_intr; + priv->base.read = nv04_timer_read; + priv->base.alarm = nv04_timer_alarm; + + INIT_LIST_HEAD(&priv->alarms); + spin_lock_init(&priv->lock); + return 0; +} + +static void +nv04_timer_dtor(struct nouveau_object *object) +{ + struct nv04_timer_priv *priv = (void *)object; + return nouveau_timer_destroy(&priv->base); +} + +static int +nv04_timer_init(struct nouveau_object *object) +{ + struct nouveau_device *device = nv_device(object); + struct nv04_timer_priv *priv = (void *)object; + u32 m = 1, f, n, d; + int ret; + + ret = nouveau_timer_init(&priv->base); + if (ret) + return ret; + + /* aim for 31.25MHz, which gives us nanosecond timestamps */ + d = 1000000 / 32; + + /* determine base clock for timer source */ +#if 0 /*XXX*/ + if (device->chipset < 0x40) { + n = nouveau_hw_get_clock(device, PLL_CORE); + } else +#endif + if (device->chipset <= 0x40) { + /*XXX: figure this out */ + f = -1; + n = 0; + } else { + f = device->crystal; + n = f; + while (n < (d * 2)) { + n += (n / m); + m++; + } + + nv_wr32(priv, 0x009220, m - 1); + } + + if (!n) { + nv_warn(priv, "unknown input clock freq\n"); + if (!nv_rd32(priv, NV04_PTIMER_NUMERATOR) || + !nv_rd32(priv, NV04_PTIMER_DENOMINATOR)) { + nv_wr32(priv, NV04_PTIMER_NUMERATOR, 1); + nv_wr32(priv, NV04_PTIMER_DENOMINATOR, 1); + } + return 0; + } + + /* reduce ratio to acceptable values */ + while (((n % 5) == 0) && ((d % 5) == 0)) { + n /= 5; + d /= 5; + } + + while (((n % 2) == 0) && ((d % 2) == 0)) { + n /= 2; + d /= 2; + } + + while (n > 0xffff || d > 0xffff) { + n >>= 1; + d >>= 1; + } + + nv_debug(priv, "input frequency : %dHz\n", f); + nv_debug(priv, "input multiplier: %d\n", m); + nv_debug(priv, "numerator : 0x%08x\n", n); + nv_debug(priv, "denominator : 0x%08x\n", d); + nv_debug(priv, "timer frequency : %dHz\n", (f * m) * d / n); + + nv_wr32(priv, NV04_PTIMER_NUMERATOR, n); + nv_wr32(priv, NV04_PTIMER_DENOMINATOR, d); + nv_wr32(priv, NV04_PTIMER_INTR_0, 0xffffffff); + nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000); + return 0; +} + +static int +nv04_timer_fini(struct nouveau_object *object, bool suspend) +{ + struct nv04_timer_priv *priv = (void *)object; + nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000); + return nouveau_timer_fini(&priv->base, suspend); +} + +struct nouveau_oclass +nv04_timer_oclass = { + .handle = NV_SUBDEV(TIMER, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_timer_ctor, + .dtor = nv04_timer_dtor, + .init = nv04_timer_init, + .fini = nv04_timer_fini, + } +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c new file mode 100644 index 0000000..e66fb77 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c @@ -0,0 +1,480 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/gpuobj.h> +#include <core/mm.h> + +#include <subdev/fb.h> +#include <subdev/vm.h> + +void +nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node) +{ + struct nouveau_vm *vm = vma->vm; + struct nouveau_vmmgr *vmm = vm->vmm; + struct nouveau_mm_node *r; + int big = vma->node->type != vmm->spg_shift; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 pde = (offset >> vmm->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits; + u32 max = 1 << (vmm->pgt_bits - bits); + u32 end, len; + + delta = 0; + list_for_each_entry(r, &node->regions, rl_entry) { + u64 phys = (u64)r->offset << 12; + u32 num = r->length >> bits; + + while (num) { + struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; + + end = (pte + num); + if (unlikely(end >= max)) + end = max; + len = end - pte; + + vmm->map(vma, pgt, node, pte, len, phys, delta); + + num -= len; + pte += len; + if (unlikely(end >= max)) { + phys += len << (bits + 12); + pde++; + pte = 0; + } + + delta += (u64)len << vma->node->type; + } + } + + vmm->flush(vm); +} + +void +nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node) +{ + nouveau_vm_map_at(vma, 0, node); +} + +void +nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length, + struct nouveau_mem *mem) +{ + struct nouveau_vm *vm = vma->vm; + struct nouveau_vmmgr *vmm = vm->vmm; + int big = vma->node->type != vmm->spg_shift; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 num = length >> vma->node->type; + u32 pde = (offset >> vmm->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits; + u32 max = 1 << (vmm->pgt_bits - bits); + unsigned m, sglen; + u32 end, len; + int i; + struct scatterlist *sg; + + for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) { + struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; + sglen = sg_dma_len(sg) >> PAGE_SHIFT; + + end = pte + sglen; + if (unlikely(end >= max)) + end = max; + len = end - pte; + + for (m = 0; m < len; m++) { + dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); + + vmm->map_sg(vma, pgt, mem, pte, 1, &addr); + num--; + pte++; + + if (num == 0) + goto finish; + } + if (unlikely(end >= max)) { + pde++; + pte = 0; + } + if (m < sglen) { + for (; m < sglen; m++) { + dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); + + vmm->map_sg(vma, pgt, mem, pte, 1, &addr); + num--; + pte++; + if (num == 0) + goto finish; + } + } + + } +finish: + vmm->flush(vm); +} + +void +nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, + struct nouveau_mem *mem) +{ + struct nouveau_vm *vm = vma->vm; + struct nouveau_vmmgr *vmm = vm->vmm; + dma_addr_t *list = mem->pages; + int big = vma->node->type != vmm->spg_shift; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 num = length >> vma->node->type; + u32 pde = (offset >> vmm->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits; + u32 max = 1 << (vmm->pgt_bits - bits); + u32 end, len; + + while (num) { + struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; + + end = (pte + num); + if (unlikely(end >= max)) + end = max; + len = end - pte; + + vmm->map_sg(vma, pgt, mem, pte, len, list); + + num -= len; + pte += len; + list += len; + if (unlikely(end >= max)) { + pde++; + pte = 0; + } + } + + vmm->flush(vm); +} + +void +nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) +{ + struct nouveau_vm *vm = vma->vm; + struct nouveau_vmmgr *vmm = vm->vmm; + int big = vma->node->type != vmm->spg_shift; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 num = length >> vma->node->type; + u32 pde = (offset >> vmm->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits; + u32 max = 1 << (vmm->pgt_bits - bits); + u32 end, len; + + while (num) { + struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; + + end = (pte + num); + if (unlikely(end >= max)) + end = max; + len = end - pte; + + vmm->unmap(pgt, pte, len); + + num -= len; + pte += len; + if (unlikely(end >= max)) { + pde++; + pte = 0; + } + } + + vmm->flush(vm); +} + +void +nouveau_vm_unmap(struct nouveau_vma *vma) +{ + nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12); +} + +static void +nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde) +{ + struct nouveau_vmmgr *vmm = vm->vmm; + struct nouveau_vm_pgd *vpgd; + struct nouveau_vm_pgt *vpgt; + struct nouveau_gpuobj *pgt; + u32 pde; + + for (pde = fpde; pde <= lpde; pde++) { + vpgt = &vm->pgt[pde - vm->fpde]; + if (--vpgt->refcount[big]) + continue; + + pgt = vpgt->obj[big]; + vpgt->obj[big] = NULL; + + list_for_each_entry(vpgd, &vm->pgd_list, head) { + vmm->map_pgt(vpgd->obj, pde, vpgt->obj); + } + + mutex_unlock(&vm->mm.mutex); + nouveau_gpuobj_ref(NULL, &pgt); + mutex_lock(&vm->mm.mutex); + } +} + +static int +nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) +{ + struct nouveau_vmmgr *vmm = vm->vmm; + struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; + struct nouveau_vm_pgd *vpgd; + struct nouveau_gpuobj *pgt; + int big = (type != vmm->spg_shift); + u32 pgt_size; + int ret; + + pgt_size = (1 << (vmm->pgt_bits + 12)) >> type; + pgt_size *= 8; + + mutex_unlock(&vm->mm.mutex); + ret = nouveau_gpuobj_new(nv_object(vm->vmm), NULL, pgt_size, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, &pgt); + mutex_lock(&vm->mm.mutex); + if (unlikely(ret)) + return ret; + + /* someone beat us to filling the PDE while we didn't have the lock */ + if (unlikely(vpgt->refcount[big]++)) { + mutex_unlock(&vm->mm.mutex); + nouveau_gpuobj_ref(NULL, &pgt); + mutex_lock(&vm->mm.mutex); + return 0; + } + + vpgt->obj[big] = pgt; + list_for_each_entry(vpgd, &vm->pgd_list, head) { + vmm->map_pgt(vpgd->obj, pde, vpgt->obj); + } + + return 0; +} + +int +nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, + u32 access, struct nouveau_vma *vma) +{ + struct nouveau_vmmgr *vmm = vm->vmm; + u32 align = (1 << page_shift) >> 12; + u32 msize = size >> 12; + u32 fpde, lpde, pde; + int ret; + + mutex_lock(&vm->mm.mutex); + ret = nouveau_mm_head(&vm->mm, page_shift, msize, msize, align, + &vma->node); + if (unlikely(ret != 0)) { + mutex_unlock(&vm->mm.mutex); + return ret; + } + + fpde = (vma->node->offset >> vmm->pgt_bits); + lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits; + + for (pde = fpde; pde <= lpde; pde++) { + struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; + int big = (vma->node->type != vmm->spg_shift); + + if (likely(vpgt->refcount[big])) { + vpgt->refcount[big]++; + continue; + } + + ret = nouveau_vm_map_pgt(vm, pde, vma->node->type); + if (ret) { + if (pde != fpde) + nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); + nouveau_mm_free(&vm->mm, &vma->node); + mutex_unlock(&vm->mm.mutex); + return ret; + } + } + mutex_unlock(&vm->mm.mutex); + + vma->vm = vm; + vma->offset = (u64)vma->node->offset << 12; + vma->access = access; + return 0; +} + +void +nouveau_vm_put(struct nouveau_vma *vma) +{ + struct nouveau_vm *vm = vma->vm; + struct nouveau_vmmgr *vmm = vm->vmm; + u32 fpde, lpde; + + if (unlikely(vma->node == NULL)) + return; + fpde = (vma->node->offset >> vmm->pgt_bits); + lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits; + + mutex_lock(&vm->mm.mutex); + nouveau_vm_unmap_pgt(vm, vma->node->type != vmm->spg_shift, fpde, lpde); + nouveau_mm_free(&vm->mm, &vma->node); + mutex_unlock(&vm->mm.mutex); +} + +int +nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, + u64 mm_offset, u32 block, struct nouveau_vm **pvm) +{ + struct nouveau_vm *vm; + u64 mm_length = (offset + length) - mm_offset; + int ret; + + vm = kzalloc(sizeof(*vm), GFP_KERNEL); + if (!vm) + return -ENOMEM; + + INIT_LIST_HEAD(&vm->pgd_list); + vm->vmm = vmm; + vm->refcount = 1; + vm->fpde = offset >> (vmm->pgt_bits + 12); + vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12); + + vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt)); + if (!vm->pgt) { + kfree(vm); + return -ENOMEM; + } + + ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, + block >> 12); + if (ret) { + vfree(vm->pgt); + kfree(vm); + return ret; + } + + *pvm = vm; + + return 0; +} + +int +nouveau_vm_new(struct nouveau_device *device, u64 offset, u64 length, + u64 mm_offset, struct nouveau_vm **pvm) +{ + struct nouveau_vmmgr *vmm = nouveau_vmmgr(device); + return vmm->create(vmm, offset, length, mm_offset, pvm); +} + +static int +nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) +{ + struct nouveau_vmmgr *vmm = vm->vmm; + struct nouveau_vm_pgd *vpgd; + int i; + + if (!pgd) + return 0; + + vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL); + if (!vpgd) + return -ENOMEM; + + nouveau_gpuobj_ref(pgd, &vpgd->obj); + + mutex_lock(&vm->mm.mutex); + for (i = vm->fpde; i <= vm->lpde; i++) + vmm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); + list_add(&vpgd->head, &vm->pgd_list); + mutex_unlock(&vm->mm.mutex); + return 0; +} + +static void +nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) +{ + struct nouveau_vm_pgd *vpgd, *tmp; + struct nouveau_gpuobj *pgd = NULL; + + if (!mpgd) + return; + + mutex_lock(&vm->mm.mutex); + list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { + if (vpgd->obj == mpgd) { + pgd = vpgd->obj; + list_del(&vpgd->head); + kfree(vpgd); + break; + } + } + mutex_unlock(&vm->mm.mutex); + + nouveau_gpuobj_ref(NULL, &pgd); +} + +static void +nouveau_vm_del(struct nouveau_vm *vm) +{ + struct nouveau_vm_pgd *vpgd, *tmp; + + list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { + nouveau_vm_unlink(vm, vpgd->obj); + } + + nouveau_mm_fini(&vm->mm); + vfree(vm->pgt); + kfree(vm); +} + +int +nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, + struct nouveau_gpuobj *pgd) +{ + struct nouveau_vm *vm; + int ret; + + vm = ref; + if (vm) { + ret = nouveau_vm_link(vm, pgd); + if (ret) + return ret; + + vm->refcount++; + } + + vm = *ptr; + *ptr = ref; + + if (vm) { + nouveau_vm_unlink(vm, pgd); + + if (--vm->refcount == 0) + nouveau_vm_del(vm); + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c new file mode 100644 index 0000000..ed45437 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c @@ -0,0 +1,151 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/gpuobj.h> + +#include "nv04.h" + +#define NV04_PDMA_SIZE (128 * 1024 * 1024) +#define NV04_PDMA_PAGE ( 4 * 1024) + +/******************************************************************************* + * VM map/unmap callbacks + ******************************************************************************/ + +static void +nv04_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) +{ + pte = 0x00008 + (pte * 4); + while (cnt) { + u32 page = PAGE_SIZE / NV04_PDMA_PAGE; + u32 phys = (u32)*list++; + while (cnt && page--) { + nv_wo32(pgt, pte, phys | 3); + phys += NV04_PDMA_PAGE; + pte += 4; + cnt -= 1; + } + } +} + +static void +nv04_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) +{ + pte = 0x00008 + (pte * 4); + while (cnt--) { + nv_wo32(pgt, pte, 0x00000000); + pte += 4; + } +} + +static void +nv04_vm_flush(struct nouveau_vm *vm) +{ +} + +/******************************************************************************* + * VM object + ******************************************************************************/ + +int +nv04_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, u64 mmstart, + struct nouveau_vm **pvm) +{ + return -EINVAL; +} + +/******************************************************************************* + * VMMGR subdev + ******************************************************************************/ + +static int +nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv04_vmmgr_priv *priv; + struct nouveau_gpuobj *dma; + int ret; + + ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIGART", + "pcigart", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.create = nv04_vm_create; + priv->base.limit = NV04_PDMA_SIZE; + priv->base.dma_bits = 32; + priv->base.pgt_bits = 32 - 12; + priv->base.spg_shift = 12; + priv->base.lpg_shift = 12; + priv->base.map_sg = nv04_vm_map_sg; + priv->base.unmap = nv04_vm_unmap; + priv->base.flush = nv04_vm_flush; + + ret = nouveau_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096, + &priv->vm); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, + (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + + 8, 16, NVOBJ_FLAG_ZERO_ALLOC, + &priv->vm->pgt[0].obj[0]); + dma = priv->vm->pgt[0].obj[0]; + priv->vm->pgt[0].refcount[0] = 1; + if (ret) + return ret; + + nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */ + nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1); + return 0; +} + +void +nv04_vmmgr_dtor(struct nouveau_object *object) +{ + struct nv04_vmmgr_priv *priv = (void *)object; + if (priv->vm) { + nouveau_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]); + nouveau_vm_ref(NULL, &priv->vm, NULL); + } + if (priv->nullp) { + pci_free_consistent(nv_device(priv)->pdev, 16 * 1024, + priv->nullp, priv->null); + } + nouveau_vmmgr_destroy(&priv->base); +} + +struct nouveau_oclass +nv04_vmmgr_oclass = { + .handle = NV_SUBDEV(VM, 0x04), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_vmmgr_ctor, + .dtor = nv04_vmmgr_dtor, + .init = _nouveau_vmmgr_init, + .fini = _nouveau_vmmgr_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h new file mode 100644 index 0000000..ec42d4b --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h @@ -0,0 +1,19 @@ +#ifndef __NV04_VMMGR_PRIV__ +#define __NV04_VMMGR_PRIV__ + +#include <subdev/vm.h> + +struct nv04_vmmgr_priv { + struct nouveau_vmmgr base; + struct nouveau_vm *vm; + dma_addr_t null; + void *nullp; +}; + +static inline struct nv04_vmmgr_priv * +nv04_vmmgr(void *obj) +{ + return (void *)nouveau_vmmgr(obj); +} + +#endif diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c new file mode 100644 index 0000000..064c762 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c @@ -0,0 +1,159 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/gpuobj.h> +#include <core/option.h> + +#include <subdev/timer.h> +#include <subdev/vm.h> + +#include "nv04.h" + +#define NV41_GART_SIZE (512 * 1024 * 1024) +#define NV41_GART_PAGE ( 4 * 1024) + +/******************************************************************************* + * VM map/unmap callbacks + ******************************************************************************/ + +static void +nv41_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) +{ + pte = pte * 4; + while (cnt) { + u32 page = PAGE_SIZE / NV41_GART_PAGE; + u64 phys = (u64)*list++; + while (cnt && page--) { + nv_wo32(pgt, pte, (phys >> 7) | 1); + phys += NV41_GART_PAGE; + pte += 4; + cnt -= 1; + } + } +} + +static void +nv41_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) +{ + pte = pte * 4; + while (cnt--) { + nv_wo32(pgt, pte, 0x00000000); + pte += 4; + } +} + +static void +nv41_vm_flush(struct nouveau_vm *vm) +{ + struct nv04_vmmgr_priv *priv = (void *)vm->vmm; + + mutex_lock(&nv_subdev(priv)->mutex); + nv_wr32(priv, 0x100810, 0x00000022); + if (!nv_wait(priv, 0x100810, 0x00000020, 0x00000020)) { + nv_warn(priv, "flush timeout, 0x%08x\n", + nv_rd32(priv, 0x100810)); + } + nv_wr32(priv, 0x100810, 0x00000000); + mutex_unlock(&nv_subdev(priv)->mutex); +} + +/******************************************************************************* + * VMMGR subdev + ******************************************************************************/ + +static int +nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct nv04_vmmgr_priv *priv; + int ret; + + if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) || + !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) { + return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass, + data, size, pobject); + } + + ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART", + "pciegart", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.create = nv04_vm_create; + priv->base.limit = NV41_GART_SIZE; + priv->base.dma_bits = 39; + priv->base.pgt_bits = 32 - 12; + priv->base.spg_shift = 12; + priv->base.lpg_shift = 12; + priv->base.map_sg = nv41_vm_map_sg; + priv->base.unmap = nv41_vm_unmap; + priv->base.flush = nv41_vm_flush; + + ret = nouveau_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096, + &priv->vm); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, + (NV41_GART_SIZE / NV41_GART_PAGE) * 4, + 16, NVOBJ_FLAG_ZERO_ALLOC, + &priv->vm->pgt[0].obj[0]); + priv->vm->pgt[0].refcount[0] = 1; + if (ret) + return ret; + + return 0; +} + +static int +nv41_vmmgr_init(struct nouveau_object *object) +{ + struct nv04_vmmgr_priv *priv = (void *)object; + struct nouveau_gpuobj *dma = priv->vm->pgt[0].obj[0]; + int ret; + + ret = nouveau_vmmgr_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x100800, dma->addr | 0x00000002); + nv_mask(priv, 0x10008c, 0x00000100, 0x00000100); + nv_wr32(priv, 0x100820, 0x00000000); + return 0; +} + +struct nouveau_oclass +nv41_vmmgr_oclass = { + .handle = NV_SUBDEV(VM, 0x41), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv41_vmmgr_ctor, + .dtor = nv04_vmmgr_dtor, + .init = nv41_vmmgr_init, + .fini = _nouveau_vmmgr_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c new file mode 100644 index 0000000..fae1f67 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c @@ -0,0 +1,249 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/gpuobj.h> +#include <core/option.h> + +#include <subdev/timer.h> +#include <subdev/vm.h> + +#include "nv04.h" + +#define NV44_GART_SIZE (512 * 1024 * 1024) +#define NV44_GART_PAGE ( 4 * 1024) + +/******************************************************************************* + * VM map/unmap callbacks + ******************************************************************************/ + +static void +nv44_vm_fill(struct nouveau_gpuobj *pgt, dma_addr_t null, + dma_addr_t *list, u32 pte, u32 cnt) +{ + u32 base = (pte << 2) & ~0x0000000f; + u32 tmp[4]; + + tmp[0] = nv_ro32(pgt, base + 0x0); + tmp[1] = nv_ro32(pgt, base + 0x4); + tmp[2] = nv_ro32(pgt, base + 0x8); + tmp[3] = nv_ro32(pgt, base + 0xc); + + while (cnt--) { + u32 addr = list ? (*list++ >> 12) : (null >> 12); + switch (pte++ & 0x3) { + case 0: + tmp[0] &= ~0x07ffffff; + tmp[0] |= addr; + break; + case 1: + tmp[0] &= ~0xf8000000; + tmp[0] |= addr << 27; + tmp[1] &= ~0x003fffff; + tmp[1] |= addr >> 5; + break; + case 2: + tmp[1] &= ~0xffc00000; + tmp[1] |= addr << 22; + tmp[2] &= ~0x0001ffff; + tmp[2] |= addr >> 10; + break; + case 3: + tmp[2] &= ~0xfffe0000; + tmp[2] |= addr << 17; + tmp[3] &= ~0x00000fff; + tmp[3] |= addr >> 15; + break; + } + } + + nv_wo32(pgt, base + 0x0, tmp[0]); + nv_wo32(pgt, base + 0x4, tmp[1]); + nv_wo32(pgt, base + 0x8, tmp[2]); + nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000); +} + +static void +nv44_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) +{ + struct nv04_vmmgr_priv *priv = (void *)vma->vm->vmm; + u32 tmp[4]; + int i; + + if (pte & 3) { + u32 max = 4 - (pte & 3); + u32 part = (cnt > max) ? max : cnt; + nv44_vm_fill(pgt, priv->null, list, pte, part); + pte += part; + list += part; + cnt -= part; + } + + while (cnt >= 4) { + for (i = 0; i < 4; i++) + tmp[i] = *list++ >> 12; + nv_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27); + nv_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22); + nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17); + nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000); + cnt -= 4; + } + + if (cnt) + nv44_vm_fill(pgt, priv->null, list, pte, cnt); +} + +static void +nv44_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) +{ + struct nv04_vmmgr_priv *priv = (void *)nouveau_vmmgr(pgt); + + if (pte & 3) { + u32 max = 4 - (pte & 3); + u32 part = (cnt > max) ? max : cnt; + nv44_vm_fill(pgt, priv->null, NULL, pte, part); + pte += part; + cnt -= part; + } + + while (cnt >= 4) { + nv_wo32(pgt, pte++ * 4, 0x00000000); + nv_wo32(pgt, pte++ * 4, 0x00000000); + nv_wo32(pgt, pte++ * 4, 0x00000000); + nv_wo32(pgt, pte++ * 4, 0x00000000); + cnt -= 4; + } + + if (cnt) + nv44_vm_fill(pgt, priv->null, NULL, pte, cnt); +} + +static void +nv44_vm_flush(struct nouveau_vm *vm) +{ + struct nv04_vmmgr_priv *priv = (void *)vm->vmm; + nv_wr32(priv, 0x100814, priv->base.limit - NV44_GART_PAGE); + nv_wr32(priv, 0x100808, 0x00000020); + if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001)) + nv_error(priv, "timeout: 0x%08x\n", nv_rd32(priv, 0x100808)); + nv_wr32(priv, 0x100808, 0x00000000); +} + +/******************************************************************************* + * VMMGR subdev + ******************************************************************************/ + +static int +nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_device *device = nv_device(parent); + struct nv04_vmmgr_priv *priv; + int ret; + + if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) || + !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) { + return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass, + data, size, pobject); + } + + ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART", + "pciegart", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.create = nv04_vm_create; + priv->base.limit = NV44_GART_SIZE; + priv->base.dma_bits = 39; + priv->base.pgt_bits = 32 - 12; + priv->base.spg_shift = 12; + priv->base.lpg_shift = 12; + priv->base.map_sg = nv44_vm_map_sg; + priv->base.unmap = nv44_vm_unmap; + priv->base.flush = nv44_vm_flush; + + priv->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &priv->null); + if (!priv->nullp) { + nv_error(priv, "unable to allocate dummy pages\n"); + return -ENOMEM; + } + + ret = nouveau_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096, + &priv->vm); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(nv_object(priv), NULL, + (NV44_GART_SIZE / NV44_GART_PAGE) * 4, + 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC, + &priv->vm->pgt[0].obj[0]); + priv->vm->pgt[0].refcount[0] = 1; + if (ret) + return ret; + + return 0; +} + +static int +nv44_vmmgr_init(struct nouveau_object *object) +{ + struct nv04_vmmgr_priv *priv = (void *)object; + struct nouveau_gpuobj *gart = priv->vm->pgt[0].obj[0]; + u32 addr; + int ret; + + ret = nouveau_vmmgr_init(&priv->base); + if (ret) + return ret; + + /* calculate vram address of this PRAMIN block, object must be + * allocated on 512KiB alignment, and not exceed a total size + * of 512KiB for this to work correctly + */ + addr = nv_rd32(priv, 0x10020c); + addr -= ((gart->addr >> 19) + 1) << 19; + + nv_wr32(priv, 0x100850, 0x80000000); + nv_wr32(priv, 0x100818, priv->null); + nv_wr32(priv, 0x100804, NV44_GART_SIZE); + nv_wr32(priv, 0x100850, 0x00008000); + nv_mask(priv, 0x10008c, 0x00000200, 0x00000200); + nv_wr32(priv, 0x100820, 0x00000000); + nv_wr32(priv, 0x10082c, 0x00000001); + nv_wr32(priv, 0x100800, addr | 0x00000010); + return 0; +} + +struct nouveau_oclass +nv44_vmmgr_oclass = { + .handle = NV_SUBDEV(VM, 0x44), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv44_vmmgr_ctor, + .dtor = nv04_vmmgr_dtor, + .init = nv44_vmmgr_init, + .fini = _nouveau_vmmgr_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c new file mode 100644 index 0000000..e067f81 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c @@ -0,0 +1,227 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/device.h> +#include <core/gpuobj.h> + +#include <subdev/timer.h> +#include <subdev/fb.h> +#include <subdev/vm.h> + +struct nv50_vmmgr_priv { + struct nouveau_vmmgr base; + spinlock_t lock; +}; + +static void +nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, + struct nouveau_gpuobj *pgt[2]) +{ + u64 phys = 0xdeadcafe00000000ULL; + u32 coverage = 0; + + if (pgt[0]) { + phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */ + coverage = (pgt[0]->size >> 3) << 12; + } else + if (pgt[1]) { + phys = 0x00000001 | pgt[1]->addr; /* present */ + coverage = (pgt[1]->size >> 3) << 16; + } + + if (phys & 1) { + if (coverage <= 32 * 1024 * 1024) + phys |= 0x60; + else if (coverage <= 64 * 1024 * 1024) + phys |= 0x40; + else if (coverage <= 128 * 1024 * 1024) + phys |= 0x20; + } + + nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys)); + nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys)); +} + +static inline u64 +vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target) +{ + phys |= 1; /* present */ + phys |= (u64)memtype << 40; + phys |= target << 4; + if (vma->access & NV_MEM_ACCESS_SYS) + phys |= (1 << 6); + if (!(vma->access & NV_MEM_ACCESS_WO)) + phys |= (1 << 3); + return phys; +} + +static void +nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) +{ + u32 comp = (mem->memtype & 0x180) >> 7; + u32 block, target; + int i; + + /* IGPs don't have real VRAM, re-target to stolen system memory */ + target = 0; + if (nouveau_fb(vma->vm->vmm)->ram.stolen) { + phys += nouveau_fb(vma->vm->vmm)->ram.stolen; + target = 3; + } + + phys = vm_addr(vma, phys, mem->memtype, target); + pte <<= 3; + cnt <<= 3; + + while (cnt) { + u32 offset_h = upper_32_bits(phys); + u32 offset_l = lower_32_bits(phys); + + for (i = 7; i >= 0; i--) { + block = 1 << (i + 3); + if (cnt >= block && !(pte & (block - 1))) + break; + } + offset_l |= (i << 7); + + phys += block << (vma->node->type - 3); + cnt -= block; + if (comp) { + u32 tag = mem->tag->offset + ((delta >> 16) * comp); + offset_h |= (tag << 17); + delta += block << (vma->node->type - 3); + } + + while (block) { + nv_wo32(pgt, pte + 0, offset_l); + nv_wo32(pgt, pte + 4, offset_h); + pte += 8; + block -= 8; + } + } +} + +static void +nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) +{ + u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2; + pte <<= 3; + while (cnt--) { + u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target); + nv_wo32(pgt, pte + 0, lower_32_bits(phys)); + nv_wo32(pgt, pte + 4, upper_32_bits(phys)); + pte += 8; + } +} + +static void +nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) +{ + pte <<= 3; + while (cnt--) { + nv_wo32(pgt, pte + 0, 0x00000000); + nv_wo32(pgt, pte + 4, 0x00000000); + pte += 8; + } +} + +static void +nv50_vm_flush(struct nouveau_vm *vm) +{ + struct nouveau_engine *engine; + int i; + + for (i = 0; i < NVDEV_SUBDEV_NR; i++) { + if (atomic_read(&vm->engref[i])) { + engine = nouveau_engine(vm->vmm, i); + if (engine && engine->tlb_flush) + engine->tlb_flush(engine); + } + } +} + +void +nv50_vm_flush_engine(struct nouveau_subdev *subdev, int engine) +{ + struct nv50_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + nv_wr32(subdev, 0x100c80, (engine << 16) | 1); + if (!nv_wait(subdev, 0x100c80, 0x00000001, 0x00000000)) + nv_error(subdev, "vm flush timeout: engine %d\n", engine); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static int +nv50_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, + u64 mm_offset, struct nouveau_vm **pvm) +{ + u32 block = (1 << (vmm->pgt_bits + 12)); + if (block > length) + block = length; + + return nouveau_vm_create(vmm, offset, length, mm_offset, block, pvm); +} + +static int +nv50_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nv50_vmmgr_priv *priv; + int ret; + + ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.limit = 1ULL << 40; + priv->base.dma_bits = 40; + priv->base.pgt_bits = 29 - 12; + priv->base.spg_shift = 12; + priv->base.lpg_shift = 16; + priv->base.create = nv50_vm_create; + priv->base.map_pgt = nv50_vm_map_pgt; + priv->base.map = nv50_vm_map; + priv->base.map_sg = nv50_vm_map_sg; + priv->base.unmap = nv50_vm_unmap; + priv->base.flush = nv50_vm_flush; + spin_lock_init(&priv->lock); + return 0; +} + +struct nouveau_oclass +nv50_vmmgr_oclass = { + .handle = NV_SUBDEV(VM, 0x50), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_vmmgr_ctor, + .dtor = _nouveau_vmmgr_dtor, + .init = _nouveau_vmmgr_init, + .fini = _nouveau_vmmgr_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c new file mode 100644 index 0000000..4c3b0a2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c @@ -0,0 +1,243 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/device.h> +#include <core/gpuobj.h> + +#include <subdev/timer.h> +#include <subdev/fb.h> +#include <subdev/vm.h> +#include <subdev/ltcg.h> + +struct nvc0_vmmgr_priv { + struct nouveau_vmmgr base; + spinlock_t lock; +}; + + +/* Map from compressed to corresponding uncompressed storage type. + * The value 0xff represents an invalid storage type. + */ +const u8 nvc0_pte_storage_type_map[256] = +{ + 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */ + 0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */ + 0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */ + 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */ + 0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27, + 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */ + 0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */ + 0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */ + 0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7, + 0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */ + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3, + 0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */ + 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, + 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */ + 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */ + 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff +}; + + +static void +nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index, + struct nouveau_gpuobj *pgt[2]) +{ + u32 pde[2] = { 0, 0 }; + + if (pgt[0]) + pde[1] = 0x00000001 | (pgt[0]->addr >> 8); + if (pgt[1]) + pde[0] = 0x00000001 | (pgt[1]->addr >> 8); + + nv_wo32(pgd, (index * 8) + 0, pde[0]); + nv_wo32(pgd, (index * 8) + 4, pde[1]); +} + +static inline u64 +nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target) +{ + phys >>= 8; + + phys |= 0x00000001; /* present */ + if (vma->access & NV_MEM_ACCESS_SYS) + phys |= 0x00000002; + + phys |= ((u64)target << 32); + phys |= ((u64)memtype << 36); + + return phys; +} + +static void +nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) +{ + u64 next = 1 << (vma->node->type - 8); + + phys = nvc0_vm_addr(vma, phys, mem->memtype, 0); + pte <<= 3; + + if (mem->tag) { + struct nouveau_ltcg *ltcg = + nouveau_ltcg(vma->vm->vmm->base.base.parent); + u32 tag = mem->tag->offset + (delta >> 17); + phys |= (u64)tag << (32 + 12); + next |= (u64)1 << (32 + 12); + ltcg->tags_clear(ltcg, tag, cnt); + } + + while (cnt--) { + nv_wo32(pgt, pte + 0, lower_32_bits(phys)); + nv_wo32(pgt, pte + 4, upper_32_bits(phys)); + phys += next; + pte += 8; + } +} + +static void +nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) +{ + u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5; + /* compressed storage types are invalid for system memory */ + u32 memtype = nvc0_pte_storage_type_map[mem->memtype & 0xff]; + + pte <<= 3; + while (cnt--) { + u64 phys = nvc0_vm_addr(vma, *list++, memtype, target); + nv_wo32(pgt, pte + 0, lower_32_bits(phys)); + nv_wo32(pgt, pte + 4, upper_32_bits(phys)); + pte += 8; + } +} + +static void +nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) +{ + pte <<= 3; + while (cnt--) { + nv_wo32(pgt, pte + 0, 0x00000000); + nv_wo32(pgt, pte + 4, 0x00000000); + pte += 8; + } +} + +void +nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type) +{ + struct nvc0_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev); + unsigned long flags; + + /* looks like maybe a "free flush slots" counter, the + * faster you write to 0x100cbc to more it decreases + */ + spin_lock_irqsave(&priv->lock, flags); + if (!nv_wait_ne(subdev, 0x100c80, 0x00ff0000, 0x00000000)) { + nv_error(subdev, "vm timeout 0: 0x%08x %d\n", + nv_rd32(subdev, 0x100c80), type); + } + + nv_wr32(subdev, 0x100cb8, addr >> 8); + nv_wr32(subdev, 0x100cbc, 0x80000000 | type); + + /* wait for flush to be queued? */ + if (!nv_wait(subdev, 0x100c80, 0x00008000, 0x00008000)) { + nv_error(subdev, "vm timeout 1: 0x%08x %d\n", + nv_rd32(subdev, 0x100c80), type); + } + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void +nvc0_vm_flush(struct nouveau_vm *vm) +{ + struct nouveau_vm_pgd *vpgd; + + list_for_each_entry(vpgd, &vm->pgd_list, head) { + nvc0_vm_flush_engine(nv_subdev(vm->vmm), vpgd->obj->addr, 1); + } +} + +static int +nvc0_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, + u64 mm_offset, struct nouveau_vm **pvm) +{ + return nouveau_vm_create(vmm, offset, length, mm_offset, 4096, pvm); +} + +static int +nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nvc0_vmmgr_priv *priv; + int ret; + + ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.limit = 1ULL << 40; + priv->base.dma_bits = 40; + priv->base.pgt_bits = 27 - 12; + priv->base.spg_shift = 12; + priv->base.lpg_shift = 17; + priv->base.create = nvc0_vm_create; + priv->base.map_pgt = nvc0_vm_map_pgt; + priv->base.map = nvc0_vm_map; + priv->base.map_sg = nvc0_vm_map_sg; + priv->base.unmap = nvc0_vm_unmap; + priv->base.flush = nvc0_vm_flush; + spin_lock_init(&priv->lock); + return 0; +} + +struct nouveau_oclass +nvc0_vmmgr_oclass = { + .handle = NV_SUBDEV(VM, 0xc0), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_vmmgr_ctor, + .dtor = _nouveau_vmmgr_dtor, + .init = _nouveau_vmmgr_init, + .fini = _nouveau_vmmgr_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/dispnv04/Makefile b/drivers/gpu/drm/nouveau/dispnv04/Makefile new file mode 100644 index 0000000..ea3f5b8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv04/Makefile @@ -0,0 +1,10 @@ +nouveau-y += dispnv04/arb.o +nouveau-y += dispnv04/crtc.o +nouveau-y += dispnv04/cursor.o +nouveau-y += dispnv04/dac.o +nouveau-y += dispnv04/dfp.o +nouveau-y += dispnv04/disp.o +nouveau-y += dispnv04/hw.o +nouveau-y += dispnv04/tvmodesnv17.o +nouveau-y += dispnv04/tvnv04.o +nouveau-y += dispnv04/tvnv17.o diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c new file mode 100644 index 0000000..2e70462 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c @@ -0,0 +1,265 @@ +/* + * Copyright 1993-2003 NVIDIA, Corporation + * Copyright 2007-2009 Stuart Bennett + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF + * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <drm/drmP.h> + +#include "nouveau_drm.h" +#include "nouveau_reg.h" +#include "hw.h" + +/****************************************************************************\ +* * +* The video arbitration routines calculate some "magic" numbers. Fixes * +* the snow seen when accessing the framebuffer without it. * +* It just works (I hope). * +* * +\****************************************************************************/ + +struct nv_fifo_info { + int lwm; + int burst; +}; + +struct nv_sim_state { + int pclk_khz; + int mclk_khz; + int nvclk_khz; + int bpp; + int mem_page_miss; + int mem_latency; + int memory_type; + int memory_width; + int two_heads; +}; + +static void +nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb) +{ + int pagemiss, cas, width, bpp; + int nvclks, mclks, pclks, crtpagemiss; + int found, mclk_extra, mclk_loop, cbs, m1, p1; + int mclk_freq, pclk_freq, nvclk_freq; + int us_m, us_n, us_p, crtc_drain_rate; + int cpm_us, us_crt, clwm; + + pclk_freq = arb->pclk_khz; + mclk_freq = arb->mclk_khz; + nvclk_freq = arb->nvclk_khz; + pagemiss = arb->mem_page_miss; + cas = arb->mem_latency; + width = arb->memory_width >> 6; + bpp = arb->bpp; + cbs = 128; + + pclks = 2; + nvclks = 10; + mclks = 13 + cas; + mclk_extra = 3; + found = 0; + + while (!found) { + found = 1; + + mclk_loop = mclks + mclk_extra; + us_m = mclk_loop * 1000 * 1000 / mclk_freq; + us_n = nvclks * 1000 * 1000 / nvclk_freq; + us_p = nvclks * 1000 * 1000 / pclk_freq; + + crtc_drain_rate = pclk_freq * bpp / 8; + crtpagemiss = 2; + crtpagemiss += 1; + cpm_us = crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq; + us_crt = cpm_us + us_m + us_n + us_p; + clwm = us_crt * crtc_drain_rate / (1000 * 1000); + clwm++; + + m1 = clwm + cbs - 512; + p1 = m1 * pclk_freq / mclk_freq; + p1 = p1 * bpp / 8; + if ((p1 < m1 && m1 > 0) || clwm > 519) { + found = !mclk_extra; + mclk_extra--; + } + if (clwm < 384) + clwm = 384; + + fifo->lwm = clwm; + fifo->burst = cbs; + } +} + +static void +nv10_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb) +{ + int fill_rate, drain_rate; + int pclks, nvclks, mclks, xclks; + int pclk_freq, nvclk_freq, mclk_freq; + int fill_lat, extra_lat; + int max_burst_o, max_burst_l; + int fifo_len, min_lwm, max_lwm; + const int burst_lat = 80; /* Maximum allowable latency due + * to the CRTC FIFO burst. (ns) */ + + pclk_freq = arb->pclk_khz; + nvclk_freq = arb->nvclk_khz; + mclk_freq = arb->mclk_khz; + + fill_rate = mclk_freq * arb->memory_width / 8; /* kB/s */ + drain_rate = pclk_freq * arb->bpp / 8; /* kB/s */ + + fifo_len = arb->two_heads ? 1536 : 1024; /* B */ + + /* Fixed FIFO refill latency. */ + + pclks = 4; /* lwm detect. */ + + nvclks = 3 /* lwm -> sync. */ + + 2 /* fbi bus cycles (1 req + 1 busy) */ + + 1 /* 2 edge sync. may be very close to edge so + * just put one. */ + + 1 /* fbi_d_rdv_n */ + + 1 /* Fbi_d_rdata */ + + 1; /* crtfifo load */ + + mclks = 1 /* 2 edge sync. may be very close to edge so + * just put one. */ + + 1 /* arb_hp_req */ + + 5 /* tiling pipeline */ + + 2 /* latency fifo */ + + 2 /* memory request to fbio block */ + + 7; /* data returned from fbio block */ + + /* Need to accumulate 256 bits for read */ + mclks += (arb->memory_type == 0 ? 2 : 1) + * arb->memory_width / 32; + + fill_lat = mclks * 1000 * 1000 / mclk_freq /* minimum mclk latency */ + + nvclks * 1000 * 1000 / nvclk_freq /* nvclk latency */ + + pclks * 1000 * 1000 / pclk_freq; /* pclk latency */ + + /* Conditional FIFO refill latency. */ + + xclks = 2 * arb->mem_page_miss + mclks /* Extra latency due to + * the overlay. */ + + 2 * arb->mem_page_miss /* Extra pagemiss latency. */ + + (arb->bpp == 32 ? 8 : 4); /* Margin of error. */ + + extra_lat = xclks * 1000 * 1000 / mclk_freq; + + if (arb->two_heads) + /* Account for another CRTC. */ + extra_lat += fill_lat + extra_lat + burst_lat; + + /* FIFO burst */ + + /* Max burst not leading to overflows. */ + max_burst_o = (1 + fifo_len - extra_lat * drain_rate / (1000 * 1000)) + * (fill_rate / 1000) / ((fill_rate - drain_rate) / 1000); + fifo->burst = min(max_burst_o, 1024); + + /* Max burst value with an acceptable latency. */ + max_burst_l = burst_lat * fill_rate / (1000 * 1000); + fifo->burst = min(max_burst_l, fifo->burst); + + fifo->burst = rounddown_pow_of_two(fifo->burst); + + /* FIFO low watermark */ + + min_lwm = (fill_lat + extra_lat) * drain_rate / (1000 * 1000) + 1; + max_lwm = fifo_len - fifo->burst + + fill_lat * drain_rate / (1000 * 1000) + + fifo->burst * drain_rate / fill_rate; + + fifo->lwm = min_lwm + 10 * (max_lwm - min_lwm) / 100; /* Empirical. */ +} + +static void +nv04_update_arb(struct drm_device *dev, int VClk, int bpp, + int *burst, int *lwm) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nouveau_dev(dev); + struct nv_fifo_info fifo_data; + struct nv_sim_state sim_data; + int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY); + int NVClk = nouveau_hw_get_clock(dev, PLL_CORE); + uint32_t cfg1 = nv_rd32(device, NV04_PFB_CFG1); + + sim_data.pclk_khz = VClk; + sim_data.mclk_khz = MClk; + sim_data.nvclk_khz = NVClk; + sim_data.bpp = bpp; + sim_data.two_heads = nv_two_heads(dev); + if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ || + (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) { + uint32_t type; + + pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type); + + sim_data.memory_type = (type >> 12) & 1; + sim_data.memory_width = 64; + sim_data.mem_latency = 3; + sim_data.mem_page_miss = 10; + } else { + sim_data.memory_type = nv_rd32(device, NV04_PFB_CFG0) & 0x1; + sim_data.memory_width = (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64; + sim_data.mem_latency = cfg1 & 0xf; + sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1); + } + + if (nv_device(drm->device)->card_type == NV_04) + nv04_calc_arb(&fifo_data, &sim_data); + else + nv10_calc_arb(&fifo_data, &sim_data); + + *burst = ilog2(fifo_data.burst >> 4); + *lwm = fifo_data.lwm >> 3; +} + +static void +nv20_update_arb(int *burst, int *lwm) +{ + unsigned int fifo_size, burst_size, graphics_lwm; + + fifo_size = 2048; + burst_size = 512; + graphics_lwm = fifo_size - burst_size; + + *burst = ilog2(burst_size >> 5); + *lwm = graphics_lwm >> 3; +} + +void +nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + if (nv_device(drm->device)->card_type < NV_20) + nv04_update_arb(dev, vclk, bpp, burst, lwm); + else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || + (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { + *burst = 128; + *lwm = 0x0480; + } else + nv20_update_arb(burst, lwm); +} diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c new file mode 100644 index 0000000..0782bd2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -0,0 +1,1072 @@ +/* + * Copyright 1993-2003 NVIDIA, Corporation + * Copyright 2006 Dave Airlie + * Copyright 2007 Maarten Maathuis + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "nouveau_drm.h" +#include "nouveau_reg.h" +#include "nouveau_bo.h" +#include "nouveau_gem.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_crtc.h" +#include "hw.h" +#include "nvreg.h" +#include "nouveau_fbcon.h" +#include "disp.h" + +#include <subdev/bios/pll.h> +#include <subdev/clock.h> + +static int +nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb); + +static void +crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index) +{ + NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index, + crtcstate->CRTC[index]); +} + +static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; + + regp->CRTC[NV_CIO_CRE_CSB] = nv_crtc->saturation = level; + if (nv_crtc->saturation && nv_gf4_disp_arch(crtc->dev)) { + regp->CRTC[NV_CIO_CRE_CSB] = 0x80; + regp->CRTC[NV_CIO_CRE_5B] = nv_crtc->saturation << 2; + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_5B); + } + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_CSB); +} + +static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; + + nv_crtc->sharpness = level; + if (level < 0) /* blur is in hw range 0x3f -> 0x20 */ + level += 0x40; + regp->ramdac_634 = level; + NVWriteRAMDAC(crtc->dev, nv_crtc->index, NV_PRAMDAC_634, regp->ramdac_634); +} + +#define PLLSEL_VPLL1_MASK \ + (NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL \ + | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2) +#define PLLSEL_VPLL2_MASK \ + (NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2 \ + | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2) +#define PLLSEL_TV_MASK \ + (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \ + | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1 \ + | NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 \ + | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2) + +/* NV4x 0x40.. pll notes: + * gpu pll: 0x4000 + 0x4004 + * ?gpu? pll: 0x4008 + 0x400c + * vpll1: 0x4010 + 0x4014 + * vpll2: 0x4018 + 0x401c + * mpll: 0x4020 + 0x4024 + * mpll: 0x4038 + 0x403c + * + * the first register of each pair has some unknown details: + * bits 0-7: redirected values from elsewhere? (similar to PLL_SETUP_CONTROL?) + * bits 20-23: (mpll) something to do with post divider? + * bits 28-31: related to single stage mode? (bit 8/12) + */ + +static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mode * mode, int dot_clock) +{ + struct drm_device *dev = crtc->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_bios *bios = nouveau_bios(drm->device); + struct nouveau_clock *clk = nouveau_clock(drm->device); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nv04_mode_state *state = &nv04_display(dev)->mode_reg; + struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index]; + struct nouveau_pll_vals *pv = ®p->pllvals; + struct nvbios_pll pll_lim; + + if (nvbios_pll_parse(bios, nv_crtc->index ? PLL_VPLL1 : PLL_VPLL0, + &pll_lim)) + return; + + /* NM2 == 0 is used to determine single stage mode on two stage plls */ + pv->NM2 = 0; + + /* for newer nv4x the blob uses only the first stage of the vpll below a + * certain clock. for a certain nv4b this is 150MHz. since the max + * output frequency of the first stage for this card is 300MHz, it is + * assumed the threshold is given by vco1 maxfreq/2 + */ + /* for early nv4x, specifically nv40 and *some* nv43 (devids 0 and 6, + * not 8, others unknown), the blob always uses both plls. no problem + * has yet been observed in allowing the use a single stage pll on all + * nv43 however. the behaviour of single stage use is untested on nv40 + */ + if (nv_device(drm->device)->chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2)) + memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2)); + + + if (!clk->pll_calc(clk, &pll_lim, dot_clock, pv)) + return; + + state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK; + + /* The blob uses this always, so let's do the same */ + if (nv_device(drm->device)->card_type == NV_40) + state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE; + /* again nv40 and some nv43 act more like nv3x as described above */ + if (nv_device(drm->device)->chipset < 0x41) + state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL | + NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL; + state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK; + + if (pv->NM2) + NV_DEBUG(drm, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n", + pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P); + else + NV_DEBUG(drm, "vpll: n %d m %d log2p %d\n", + pv->N1, pv->M1, pv->log2P); + + nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); +} + +static void +nv_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + unsigned char seq1 = 0, crtc17 = 0; + unsigned char crtc1A; + + NV_DEBUG(drm, "Setting dpms mode %d on CRTC %d\n", mode, + nv_crtc->index); + + if (nv_crtc->last_dpms == mode) /* Don't do unnecessary mode changes. */ + return; + + nv_crtc->last_dpms = mode; + + if (nv_two_heads(dev)) + NVSetOwner(dev, nv_crtc->index); + + /* nv4ref indicates these two RPC1 bits inhibit h/v sync */ + crtc1A = NVReadVgaCrtc(dev, nv_crtc->index, + NV_CIO_CRE_RPC1_INDEX) & ~0xC0; + switch (mode) { + case DRM_MODE_DPMS_STANDBY: + /* Screen: Off; HSync: Off, VSync: On -- Not Supported */ + seq1 = 0x20; + crtc17 = 0x80; + crtc1A |= 0x80; + break; + case DRM_MODE_DPMS_SUSPEND: + /* Screen: Off; HSync: On, VSync: Off -- Not Supported */ + seq1 = 0x20; + crtc17 = 0x80; + crtc1A |= 0x40; + break; + case DRM_MODE_DPMS_OFF: + /* Screen: Off; HSync: Off, VSync: Off */ + seq1 = 0x20; + crtc17 = 0x00; + crtc1A |= 0xC0; + break; + case DRM_MODE_DPMS_ON: + default: + /* Screen: On; HSync: On, VSync: On */ + seq1 = 0x00; + crtc17 = 0x80; + break; + } + + NVVgaSeqReset(dev, nv_crtc->index, true); + /* Each head has it's own sequencer, so we can turn it off when we want */ + seq1 |= (NVReadVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX) & ~0x20); + NVWriteVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX, seq1); + crtc17 |= (NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX) & ~0x80); + mdelay(10); + NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX, crtc17); + NVVgaSeqReset(dev, nv_crtc->index, false); + + NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A); +} + +static bool +nv_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void +nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; + struct drm_framebuffer *fb = crtc->fb; + + /* Calculate our timings */ + int horizDisplay = (mode->crtc_hdisplay >> 3) - 1; + int horizStart = (mode->crtc_hsync_start >> 3) + 1; + int horizEnd = (mode->crtc_hsync_end >> 3) + 1; + int horizTotal = (mode->crtc_htotal >> 3) - 5; + int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1; + int horizBlankEnd = (mode->crtc_htotal >> 3) - 1; + int vertDisplay = mode->crtc_vdisplay - 1; + int vertStart = mode->crtc_vsync_start - 1; + int vertEnd = mode->crtc_vsync_end - 1; + int vertTotal = mode->crtc_vtotal - 2; + int vertBlankStart = mode->crtc_vdisplay - 1; + int vertBlankEnd = mode->crtc_vtotal - 1; + + struct drm_encoder *encoder; + bool fp_output = false; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + + if (encoder->crtc == crtc && + (nv_encoder->dcb->type == DCB_OUTPUT_LVDS || + nv_encoder->dcb->type == DCB_OUTPUT_TMDS)) + fp_output = true; + } + + if (fp_output) { + vertStart = vertTotal - 3; + vertEnd = vertTotal - 2; + vertBlankStart = vertStart; + horizStart = horizTotal - 5; + horizEnd = horizTotal - 2; + horizBlankEnd = horizTotal + 4; +#if 0 + if (dev->overlayAdaptor && nv_device(drm->device)->card_type >= NV_10) + /* This reportedly works around some video overlay bandwidth problems */ + horizTotal += 2; +#endif + } + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + vertTotal |= 1; + +#if 0 + ErrorF("horizDisplay: 0x%X \n", horizDisplay); + ErrorF("horizStart: 0x%X \n", horizStart); + ErrorF("horizEnd: 0x%X \n", horizEnd); + ErrorF("horizTotal: 0x%X \n", horizTotal); + ErrorF("horizBlankStart: 0x%X \n", horizBlankStart); + ErrorF("horizBlankEnd: 0x%X \n", horizBlankEnd); + ErrorF("vertDisplay: 0x%X \n", vertDisplay); + ErrorF("vertStart: 0x%X \n", vertStart); + ErrorF("vertEnd: 0x%X \n", vertEnd); + ErrorF("vertTotal: 0x%X \n", vertTotal); + ErrorF("vertBlankStart: 0x%X \n", vertBlankStart); + ErrorF("vertBlankEnd: 0x%X \n", vertBlankEnd); +#endif + + /* + * compute correct Hsync & Vsync polarity + */ + if ((mode->flags & (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)) + && (mode->flags & (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) { + + regp->MiscOutReg = 0x23; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + regp->MiscOutReg |= 0x40; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + regp->MiscOutReg |= 0x80; + } else { + int vdisplay = mode->vdisplay; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + vdisplay *= 2; + if (mode->vscan > 1) + vdisplay *= mode->vscan; + if (vdisplay < 400) + regp->MiscOutReg = 0xA3; /* +hsync -vsync */ + else if (vdisplay < 480) + regp->MiscOutReg = 0x63; /* -hsync +vsync */ + else if (vdisplay < 768) + regp->MiscOutReg = 0xE3; /* -hsync -vsync */ + else + regp->MiscOutReg = 0x23; /* +hsync +vsync */ + } + + regp->MiscOutReg |= (mode->clock_index & 0x03) << 2; + + /* + * Time Sequencer + */ + regp->Sequencer[NV_VIO_SR_RESET_INDEX] = 0x00; + /* 0x20 disables the sequencer */ + if (mode->flags & DRM_MODE_FLAG_CLKDIV2) + regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x29; + else + regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x21; + regp->Sequencer[NV_VIO_SR_PLANE_MASK_INDEX] = 0x0F; + regp->Sequencer[NV_VIO_SR_CHAR_MAP_INDEX] = 0x00; + regp->Sequencer[NV_VIO_SR_MEM_MODE_INDEX] = 0x0E; + + /* + * CRTC + */ + regp->CRTC[NV_CIO_CR_HDT_INDEX] = horizTotal; + regp->CRTC[NV_CIO_CR_HDE_INDEX] = horizDisplay; + regp->CRTC[NV_CIO_CR_HBS_INDEX] = horizBlankStart; + regp->CRTC[NV_CIO_CR_HBE_INDEX] = (1 << 7) | + XLATE(horizBlankEnd, 0, NV_CIO_CR_HBE_4_0); + regp->CRTC[NV_CIO_CR_HRS_INDEX] = horizStart; + regp->CRTC[NV_CIO_CR_HRE_INDEX] = XLATE(horizBlankEnd, 5, NV_CIO_CR_HRE_HBE_5) | + XLATE(horizEnd, 0, NV_CIO_CR_HRE_4_0); + regp->CRTC[NV_CIO_CR_VDT_INDEX] = vertTotal; + regp->CRTC[NV_CIO_CR_OVL_INDEX] = XLATE(vertStart, 9, NV_CIO_CR_OVL_VRS_9) | + XLATE(vertDisplay, 9, NV_CIO_CR_OVL_VDE_9) | + XLATE(vertTotal, 9, NV_CIO_CR_OVL_VDT_9) | + (1 << 4) | + XLATE(vertBlankStart, 8, NV_CIO_CR_OVL_VBS_8) | + XLATE(vertStart, 8, NV_CIO_CR_OVL_VRS_8) | + XLATE(vertDisplay, 8, NV_CIO_CR_OVL_VDE_8) | + XLATE(vertTotal, 8, NV_CIO_CR_OVL_VDT_8); + regp->CRTC[NV_CIO_CR_RSAL_INDEX] = 0x00; + regp->CRTC[NV_CIO_CR_CELL_HT_INDEX] = ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ? MASK(NV_CIO_CR_CELL_HT_SCANDBL) : 0) | + 1 << 6 | + XLATE(vertBlankStart, 9, NV_CIO_CR_CELL_HT_VBS_9); + regp->CRTC[NV_CIO_CR_CURS_ST_INDEX] = 0x00; + regp->CRTC[NV_CIO_CR_CURS_END_INDEX] = 0x00; + regp->CRTC[NV_CIO_CR_SA_HI_INDEX] = 0x00; + regp->CRTC[NV_CIO_CR_SA_LO_INDEX] = 0x00; + regp->CRTC[NV_CIO_CR_TCOFF_HI_INDEX] = 0x00; + regp->CRTC[NV_CIO_CR_TCOFF_LO_INDEX] = 0x00; + regp->CRTC[NV_CIO_CR_VRS_INDEX] = vertStart; + regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0); + regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay; + /* framebuffer can be larger than crtc scanout area. */ + regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitches[0] / 8; + regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00; + regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart; + regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd; + regp->CRTC[NV_CIO_CR_MODE_INDEX] = 0x43; + regp->CRTC[NV_CIO_CR_LCOMP_INDEX] = 0xff; + + /* + * Some extended CRTC registers (they are not saved with the rest of the vga regs). + */ + + /* framebuffer can be larger than crtc scanout area. */ + regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = + XLATE(fb->pitches[0] / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8); + regp->CRTC[NV_CIO_CRE_42] = + XLATE(fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11); + regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ? + MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00; + regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) | + XLATE(vertBlankStart, 10, NV_CIO_CRE_LSR_VBS_10) | + XLATE(vertStart, 10, NV_CIO_CRE_LSR_VRS_10) | + XLATE(vertDisplay, 10, NV_CIO_CRE_LSR_VDE_10) | + XLATE(vertTotal, 10, NV_CIO_CRE_LSR_VDT_10); + regp->CRTC[NV_CIO_CRE_HEB__INDEX] = XLATE(horizStart, 8, NV_CIO_CRE_HEB_HRS_8) | + XLATE(horizBlankStart, 8, NV_CIO_CRE_HEB_HBS_8) | + XLATE(horizDisplay, 8, NV_CIO_CRE_HEB_HDE_8) | + XLATE(horizTotal, 8, NV_CIO_CRE_HEB_HDT_8); + regp->CRTC[NV_CIO_CRE_EBR_INDEX] = XLATE(vertBlankStart, 11, NV_CIO_CRE_EBR_VBS_11) | + XLATE(vertStart, 11, NV_CIO_CRE_EBR_VRS_11) | + XLATE(vertDisplay, 11, NV_CIO_CRE_EBR_VDE_11) | + XLATE(vertTotal, 11, NV_CIO_CRE_EBR_VDT_11); + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) { + horizTotal = (horizTotal >> 1) & ~1; + regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = horizTotal; + regp->CRTC[NV_CIO_CRE_HEB__INDEX] |= XLATE(horizTotal, 8, NV_CIO_CRE_HEB_ILC_8); + } else + regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = 0xff; /* interlace off */ + + /* + * Graphics Display Controller + */ + regp->Graphics[NV_VIO_GX_SR_INDEX] = 0x00; + regp->Graphics[NV_VIO_GX_SREN_INDEX] = 0x00; + regp->Graphics[NV_VIO_GX_CCOMP_INDEX] = 0x00; + regp->Graphics[NV_VIO_GX_ROP_INDEX] = 0x00; + regp->Graphics[NV_VIO_GX_READ_MAP_INDEX] = 0x00; + regp->Graphics[NV_VIO_GX_MODE_INDEX] = 0x40; /* 256 color mode */ + regp->Graphics[NV_VIO_GX_MISC_INDEX] = 0x05; /* map 64k mem + graphic mode */ + regp->Graphics[NV_VIO_GX_DONT_CARE_INDEX] = 0x0F; + regp->Graphics[NV_VIO_GX_BIT_MASK_INDEX] = 0xFF; + + regp->Attribute[0] = 0x00; /* standard colormap translation */ + regp->Attribute[1] = 0x01; + regp->Attribute[2] = 0x02; + regp->Attribute[3] = 0x03; + regp->Attribute[4] = 0x04; + regp->Attribute[5] = 0x05; + regp->Attribute[6] = 0x06; + regp->Attribute[7] = 0x07; + regp->Attribute[8] = 0x08; + regp->Attribute[9] = 0x09; + regp->Attribute[10] = 0x0A; + regp->Attribute[11] = 0x0B; + regp->Attribute[12] = 0x0C; + regp->Attribute[13] = 0x0D; + regp->Attribute[14] = 0x0E; + regp->Attribute[15] = 0x0F; + regp->Attribute[NV_CIO_AR_MODE_INDEX] = 0x01; /* Enable graphic mode */ + /* Non-vga */ + regp->Attribute[NV_CIO_AR_OSCAN_INDEX] = 0x00; + regp->Attribute[NV_CIO_AR_PLANE_INDEX] = 0x0F; /* enable all color planes */ + regp->Attribute[NV_CIO_AR_HPP_INDEX] = 0x00; + regp->Attribute[NV_CIO_AR_CSEL_INDEX] = 0x00; +} + +/** + * Sets up registers for the given mode/adjusted_mode pair. + * + * The clocks, CRTCs and outputs attached to this CRTC must be off. + * + * This shouldn't enable any clocks, CRTCs, or outputs, but they should + * be easily turned on/off after this. + */ +static void +nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) +{ + struct drm_device *dev = crtc->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; + struct nv04_crtc_reg *savep = &nv04_display(dev)->saved_reg.crtc_reg[nv_crtc->index]; + struct drm_encoder *encoder; + bool lvds_output = false, tmds_output = false, tv_output = false, + off_chip_digital = false; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + bool digital = false; + + if (encoder->crtc != crtc) + continue; + + if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) + digital = lvds_output = true; + if (nv_encoder->dcb->type == DCB_OUTPUT_TV) + tv_output = true; + if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS) + digital = tmds_output = true; + if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && digital) + off_chip_digital = true; + } + + /* Registers not directly related to the (s)vga mode */ + + /* What is the meaning of this register? */ + /* A few popular values are 0x18, 0x1c, 0x38, 0x3c */ + regp->CRTC[NV_CIO_CRE_ENH_INDEX] = savep->CRTC[NV_CIO_CRE_ENH_INDEX] & ~(1<<5); + + regp->crtc_eng_ctrl = 0; + /* Except for rare conditions I2C is enabled on the primary crtc */ + if (nv_crtc->index == 0) + regp->crtc_eng_ctrl |= NV_CRTC_FSEL_I2C; +#if 0 + /* Set overlay to desired crtc. */ + if (dev->overlayAdaptor) { + NVPortPrivPtr pPriv = GET_OVERLAY_PRIVATE(dev); + if (pPriv->overlayCRTC == nv_crtc->index) + regp->crtc_eng_ctrl |= NV_CRTC_FSEL_OVERLAY; + } +#endif + + /* ADDRESS_SPACE_PNVM is the same as setting HCUR_ASI */ + regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 | + NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 | + NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM; + if (nv_device(drm->device)->chipset >= 0x11) + regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE; + + /* Unblock some timings */ + regp->CRTC[NV_CIO_CRE_53] = 0; + regp->CRTC[NV_CIO_CRE_54] = 0; + + /* 0x00 is disabled, 0x11 is lvds, 0x22 crt and 0x88 tmds */ + if (lvds_output) + regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x11; + else if (tmds_output) + regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x88; + else + regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x22; + + /* These values seem to vary */ + /* This register seems to be used by the bios to make certain decisions on some G70 cards? */ + regp->CRTC[NV_CIO_CRE_SCRATCH4__INDEX] = savep->CRTC[NV_CIO_CRE_SCRATCH4__INDEX]; + + nv_crtc_set_digital_vibrance(crtc, nv_crtc->saturation); + + /* probably a scratch reg, but kept for cargo-cult purposes: + * bit0: crtc0?, head A + * bit6: lvds, head A + * bit7: (only in X), head A + */ + if (nv_crtc->index == 0) + regp->CRTC[NV_CIO_CRE_4B] = savep->CRTC[NV_CIO_CRE_4B] | 0x80; + + /* The blob seems to take the current value from crtc 0, add 4 to that + * and reuse the old value for crtc 1 */ + regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = nv04_display(dev)->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY]; + if (!nv_crtc->index) + regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] += 4; + + /* the blob sometimes sets |= 0x10 (which is the same as setting |= + * 1 << 30 on 0x60.830), for no apparent reason */ + regp->CRTC[NV_CIO_CRE_59] = off_chip_digital; + + if (nv_device(drm->device)->card_type >= NV_30) + regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1; + + regp->crtc_830 = mode->crtc_vdisplay - 3; + regp->crtc_834 = mode->crtc_vdisplay - 1; + + if (nv_device(drm->device)->card_type == NV_40) + /* This is what the blob does */ + regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850); + + if (nv_device(drm->device)->card_type >= NV_30) + regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); + + if (nv_device(drm->device)->card_type >= NV_10) + regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC; + else + regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC; + + /* Some misc regs */ + if (nv_device(drm->device)->card_type == NV_40) { + regp->CRTC[NV_CIO_CRE_85] = 0xFF; + regp->CRTC[NV_CIO_CRE_86] = 0x1; + } + + regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (crtc->fb->depth + 1) / 8; + /* Enable slaved mode (called MODE_TV in nv4ref.h) */ + if (lvds_output || tmds_output || tv_output) + regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (1 << 7); + + /* Generic PRAMDAC regs */ + + if (nv_device(drm->device)->card_type >= NV_10) + /* Only bit that bios and blob set. */ + regp->nv10_cursync = (1 << 25); + + regp->ramdac_gen_ctrl = NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS | + NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL | + NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON; + if (crtc->fb->depth == 16) + regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; + if (nv_device(drm->device)->chipset >= 0x11) + regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG; + + regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */ + regp->tv_setup = 0; + + nv_crtc_set_image_sharpening(crtc, nv_crtc->sharpness); + + /* Some values the blob sets */ + regp->ramdac_8c0 = 0x100; + regp->ramdac_a20 = 0x0; + regp->ramdac_a24 = 0xfffff; + regp->ramdac_a34 = 0x1; +} + +/** + * Sets up registers for the given mode/adjusted_mode pair. + * + * The clocks, CRTCs and outputs attached to this CRTC must be off. + * + * This shouldn't enable any clocks, CRTCs, or outputs, but they should + * be easily turned on/off after this. + */ +static int +nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, struct drm_framebuffer *old_fb) +{ + struct drm_device *dev = crtc->dev; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nouveau_drm *drm = nouveau_drm(dev); + + NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index); + drm_mode_debug_printmodeline(adjusted_mode); + + /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ + nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); + + nv_crtc_mode_set_vga(crtc, adjusted_mode); + /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */ + if (nv_device(drm->device)->card_type == NV_40) + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk); + nv_crtc_mode_set_regs(crtc, adjusted_mode); + nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock); + return 0; +} + +static void nv_crtc_save(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct nv04_mode_state *state = &nv04_display(dev)->mode_reg; + struct nv04_crtc_reg *crtc_state = &state->crtc_reg[nv_crtc->index]; + struct nv04_mode_state *saved = &nv04_display(dev)->saved_reg; + struct nv04_crtc_reg *crtc_saved = &saved->crtc_reg[nv_crtc->index]; + + if (nv_two_heads(crtc->dev)) + NVSetOwner(crtc->dev, nv_crtc->index); + + nouveau_hw_save_state(crtc->dev, nv_crtc->index, saved); + + /* init some state to saved value */ + state->sel_clk = saved->sel_clk & ~(0x5 << 16); + crtc_state->CRTC[NV_CIO_CRE_LCD__INDEX] = crtc_saved->CRTC[NV_CIO_CRE_LCD__INDEX]; + state->pllsel = saved->pllsel & ~(PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK); + crtc_state->gpio_ext = crtc_saved->gpio_ext; +} + +static void nv_crtc_restore(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = crtc->dev; + int head = nv_crtc->index; + uint8_t saved_cr21 = nv04_display(dev)->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21]; + + if (nv_two_heads(crtc->dev)) + NVSetOwner(crtc->dev, head); + + nouveau_hw_load_state(crtc->dev, head, &nv04_display(dev)->saved_reg); + nv_lock_vga_crtc_shadow(crtc->dev, head, saved_cr21); + + nv_crtc->last_dpms = NV_DPMS_CLEARED; +} + +static void nv_crtc_prepare(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_crtc_helper_funcs *funcs = crtc->helper_private; + + if (nv_two_heads(dev)) + NVSetOwner(dev, nv_crtc->index); + + drm_vblank_pre_modeset(dev, nv_crtc->index); + funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + + NVBlankScreen(dev, nv_crtc->index, true); + + /* Some more preparation. */ + NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA); + if (nv_device(drm->device)->card_type == NV_40) { + uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900); + NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000); + } +} + +static void nv_crtc_commit(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_crtc_helper_funcs *funcs = crtc->helper_private; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + nouveau_hw_load_state(dev, nv_crtc->index, &nv04_display(dev)->mode_reg); + nv04_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL); + +#ifdef __BIG_ENDIAN + /* turn on LFB swapping */ + { + uint8_t tmp = NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR); + tmp |= MASK(NV_CIO_CRE_RCR_ENDIAN_BIG); + NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR, tmp); + } +#endif + + funcs->dpms(crtc, DRM_MODE_DPMS_ON); + drm_vblank_post_modeset(dev, nv_crtc->index); +} + +static void nv_crtc_destroy(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + if (!nv_crtc) + return; + + drm_crtc_cleanup(crtc); + + nouveau_bo_unmap(nv_crtc->cursor.nvbo); + nouveau_bo_unpin(nv_crtc->cursor.nvbo); + nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); + kfree(nv_crtc); +} + +static void +nv_crtc_gamma_load(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = nv_crtc->base.dev; + struct rgb { uint8_t r, g, b; } __attribute__((packed)) *rgbs; + int i; + + rgbs = (struct rgb *)nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].DAC; + for (i = 0; i < 256; i++) { + rgbs[i].r = nv_crtc->lut.r[i] >> 8; + rgbs[i].g = nv_crtc->lut.g[i] >> 8; + rgbs[i].b = nv_crtc->lut.b[i] >> 8; + } + + nouveau_hw_load_state_palette(dev, nv_crtc->index, &nv04_display(dev)->mode_reg); +} + +static void +nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, + uint32_t size) +{ + int end = (start + size > 256) ? 256 : start + size, i; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + for (i = start; i < end; i++) { + nv_crtc->lut.r[i] = r[i]; + nv_crtc->lut.g[i] = g[i]; + nv_crtc->lut.b[i] = b[i]; + } + + /* We need to know the depth before we upload, but it's possible to + * get called before a framebuffer is bound. If this is the case, + * mark the lut values as dirty by setting depth==0, and it'll be + * uploaded on the first mode_set_base() + */ + if (!nv_crtc->base.fb) { + nv_crtc->lut.depth = 0; + return; + } + + nv_crtc_gamma_load(crtc); +} + +static int +nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *passed_fb, + int x, int y, bool atomic) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; + struct drm_framebuffer *drm_fb; + struct nouveau_framebuffer *fb; + int arb_burst, arb_lwm; + int ret; + + NV_DEBUG(drm, "index %d\n", nv_crtc->index); + + /* no fb bound */ + if (!atomic && !crtc->fb) { + NV_DEBUG(drm, "No FB bound\n"); + return 0; + } + + + /* If atomic, we want to switch to the fb we were passed, so + * now we update pointers to do that. (We don't pin; just + * assume we're already pinned and update the base address.) + */ + if (atomic) { + drm_fb = passed_fb; + fb = nouveau_framebuffer(passed_fb); + } else { + drm_fb = crtc->fb; + fb = nouveau_framebuffer(crtc->fb); + /* If not atomic, we can go ahead and pin, and unpin the + * old fb we were passed. + */ + ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); + if (ret) + return ret; + + if (passed_fb) { + struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb); + nouveau_bo_unpin(ofb->nvbo); + } + } + + nv_crtc->fb.offset = fb->nvbo->bo.offset; + + if (nv_crtc->lut.depth != drm_fb->depth) { + nv_crtc->lut.depth = drm_fb->depth; + nv_crtc_gamma_load(crtc); + } + + /* Update the framebuffer format. */ + regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] &= ~3; + regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (crtc->fb->depth + 1) / 8; + regp->ramdac_gen_ctrl &= ~NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; + if (crtc->fb->depth == 16) + regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_PIXEL_INDEX); + NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL, + regp->ramdac_gen_ctrl); + + regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitches[0] >> 3; + regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = + XLATE(drm_fb->pitches[0] >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8); + regp->CRTC[NV_CIO_CRE_42] = + XLATE(drm_fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11); + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX); + crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX); + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42); + + /* Update the framebuffer location. */ + regp->fb_start = nv_crtc->fb.offset & ~3; + regp->fb_start += (y * drm_fb->pitches[0]) + (x * drm_fb->bits_per_pixel / 8); + nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start); + + /* Update the arbitration parameters. */ + nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel, + &arb_burst, &arb_lwm); + + regp->CRTC[NV_CIO_CRE_FF_INDEX] = arb_burst; + regp->CRTC[NV_CIO_CRE_FFLWM__INDEX] = arb_lwm & 0xff; + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX); + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX); + + if (nv_device(drm->device)->card_type >= NV_20) { + regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8; + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47); + } + + return 0; +} + +static int +nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); +} + +static int +nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + struct nouveau_drm *drm = nouveau_drm(crtc->dev); + struct drm_device *dev = drm->dev; + + if (state == ENTER_ATOMIC_MODE_SET) + nouveau_fbcon_save_disable_accel(dev); + else + nouveau_fbcon_restore_accel(dev); + + return nv04_crtc_do_mode_set_base(crtc, fb, x, y, true); +} + +static void nv04_cursor_upload(struct drm_device *dev, struct nouveau_bo *src, + struct nouveau_bo *dst) +{ + int width = nv_cursor_width(dev); + uint32_t pixel; + int i, j; + + for (i = 0; i < width; i++) { + for (j = 0; j < width; j++) { + pixel = nouveau_bo_rd32(src, i*64 + j); + + nouveau_bo_wr16(dst, i*width + j, (pixel & 0x80000000) >> 16 + | (pixel & 0xf80000) >> 9 + | (pixel & 0xf800) >> 6 + | (pixel & 0xf8) >> 3); + } + } +} + +static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src, + struct nouveau_bo *dst) +{ + uint32_t pixel; + int alpha, i; + + /* nv11+ supports premultiplied (PM), or non-premultiplied (NPM) alpha + * cursors (though NPM in combination with fp dithering may not work on + * nv11, from "nv" driver history) + * NPM mode needs NV_PCRTC_CURSOR_CONFIG_ALPHA_BLEND set and is what the + * blob uses, however we get given PM cursors so we use PM mode + */ + for (i = 0; i < 64 * 64; i++) { + pixel = nouveau_bo_rd32(src, i); + + /* hw gets unhappy if alpha <= rgb values. for a PM image "less + * than" shouldn't happen; fix "equal to" case by adding one to + * alpha channel (slightly inaccurate, but so is attempting to + * get back to NPM images, due to limits of integer precision) + */ + alpha = pixel >> 24; + if (alpha > 0 && alpha < 255) + pixel = (pixel & 0x00ffffff) | ((alpha + 1) << 24); + +#ifdef __BIG_ENDIAN + { + struct nouveau_drm *drm = nouveau_drm(dev); + + if (nv_device(drm->device)->chipset == 0x11) { + pixel = ((pixel & 0x000000ff) << 24) | + ((pixel & 0x0000ff00) << 8) | + ((pixel & 0x00ff0000) >> 8) | + ((pixel & 0xff000000) >> 24); + } + } +#endif + + nouveau_bo_wr32(dst, i, pixel); + } +} + +static int +nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t buffer_handle, uint32_t width, uint32_t height) +{ + struct nouveau_drm *drm = nouveau_drm(crtc->dev); + struct drm_device *dev = drm->dev; + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nouveau_bo *cursor = NULL; + struct drm_gem_object *gem; + int ret = 0; + + if (!buffer_handle) { + nv_crtc->cursor.hide(nv_crtc, true); + return 0; + } + + if (width != 64 || height != 64) + return -EINVAL; + + gem = drm_gem_object_lookup(dev, file_priv, buffer_handle); + if (!gem) + return -ENOENT; + cursor = nouveau_gem_object(gem); + + ret = nouveau_bo_map(cursor); + if (ret) + goto out; + + if (nv_device(drm->device)->chipset >= 0x11) + nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); + else + nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); + + nouveau_bo_unmap(cursor); + nv_crtc->cursor.offset = nv_crtc->cursor.nvbo->bo.offset; + nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); + nv_crtc->cursor.show(nv_crtc, true); +out: + drm_gem_object_unreference_unlocked(gem); + return ret; +} + +static int +nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + nv_crtc->cursor.set_pos(nv_crtc, x, y); + return 0; +} + +static const struct drm_crtc_funcs nv04_crtc_funcs = { + .save = nv_crtc_save, + .restore = nv_crtc_restore, + .cursor_set = nv04_crtc_cursor_set, + .cursor_move = nv04_crtc_cursor_move, + .gamma_set = nv_crtc_gamma_set, + .set_config = drm_crtc_helper_set_config, + .page_flip = nouveau_crtc_page_flip, + .destroy = nv_crtc_destroy, +}; + +static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = { + .dpms = nv_crtc_dpms, + .prepare = nv_crtc_prepare, + .commit = nv_crtc_commit, + .mode_fixup = nv_crtc_mode_fixup, + .mode_set = nv_crtc_mode_set, + .mode_set_base = nv04_crtc_mode_set_base, + .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, + .load_lut = nv_crtc_gamma_load, +}; + +int +nv04_crtc_create(struct drm_device *dev, int crtc_num) +{ + struct nouveau_crtc *nv_crtc; + int ret, i; + + nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL); + if (!nv_crtc) + return -ENOMEM; + + for (i = 0; i < 256; i++) { + nv_crtc->lut.r[i] = i << 8; + nv_crtc->lut.g[i] = i << 8; + nv_crtc->lut.b[i] = i << 8; + } + nv_crtc->lut.depth = 0; + + nv_crtc->index = crtc_num; + nv_crtc->last_dpms = NV_DPMS_CLEARED; + + drm_crtc_init(dev, &nv_crtc->base, &nv04_crtc_funcs); + drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs); + drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); + + ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &nv_crtc->cursor.nvbo); + if (!ret) { + ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); + if (!ret) { + ret = nouveau_bo_map(nv_crtc->cursor.nvbo); + if (ret) + nouveau_bo_unpin(nv_crtc->cursor.nvbo); + } + if (ret) + nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); + } + + nv04_cursor_init(nv_crtc); + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c new file mode 100644 index 0000000..a810303 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c @@ -0,0 +1,70 @@ +#include <drm/drmP.h> +#include <drm/drm_mode.h> +#include "nouveau_drm.h" +#include "nouveau_reg.h" +#include "nouveau_crtc.h" +#include "hw.h" + +static void +nv04_cursor_show(struct nouveau_crtc *nv_crtc, bool update) +{ + nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, true); +} + +static void +nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update) +{ + nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, false); +} + +static void +nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) +{ + nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; + NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index, + NV_PRAMDAC_CU_START_POS, + XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) | + XLATE(x, 0, NV_PRAMDAC_CU_START_POS_X)); +} + +static void +crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index) +{ + NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index, + crtcstate->CRTC[index]); +} + +static void +nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; + struct drm_crtc *crtc = &nv_crtc->base; + + regp->CRTC[NV_CIO_CRE_HCUR_ADDR0_INDEX] = + MASK(NV_CIO_CRE_HCUR_ASI) | + XLATE(offset, 17, NV_CIO_CRE_HCUR_ADDR0_ADR); + regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] = + XLATE(offset, 11, NV_CIO_CRE_HCUR_ADDR1_ADR); + if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN) + regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] |= + MASK(NV_CIO_CRE_HCUR_ADDR1_CUR_DBL); + regp->CRTC[NV_CIO_CRE_HCUR_ADDR2_INDEX] = offset >> 24; + + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); + crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); + if (nv_device(drm->device)->card_type == NV_40) + nv_fix_nv40_hw_cursor(dev, nv_crtc->index); +} + +int +nv04_cursor_init(struct nouveau_crtc *crtc) +{ + crtc->cursor.set_offset = nv04_cursor_set_offset; + crtc->cursor.set_pos = nv04_cursor_set_pos; + crtc->cursor.hide = nv04_cursor_hide; + crtc->cursor.show = nv04_cursor_show; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c new file mode 100644 index 0000000..434b920 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c @@ -0,0 +1,556 @@ +/* + * Copyright 2003 NVIDIA, Corporation + * Copyright 2006 Dave Airlie + * Copyright 2007 Maarten Maathuis + * Copyright 2007-2009 Stuart Bennett + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "nouveau_drm.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_crtc.h" +#include "hw.h" +#include "nvreg.h" + +#include <subdev/bios/gpio.h> +#include <subdev/gpio.h> +#include <subdev/timer.h> + +int nv04_dac_output_offset(struct drm_encoder *encoder) +{ + struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; + int offset = 0; + + if (dcb->or & (8 | DCB_OUTPUT_C)) + offset += 0x68; + if (dcb->or & (8 | DCB_OUTPUT_B)) + offset += 0x2000; + + return offset; +} + +/* + * arbitrary limit to number of sense oscillations tolerated in one sample + * period (observed to be at least 13 in "nvidia") + */ +#define MAX_HBLANK_OSC 20 + +/* + * arbitrary limit to number of conflicting sample pairs to tolerate at a + * voltage step (observed to be at least 5 in "nvidia") + */ +#define MAX_SAMPLE_PAIRS 10 + +static int sample_load_twice(struct drm_device *dev, bool sense[2]) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_timer *ptimer = nouveau_timer(device); + int i; + + for (i = 0; i < 2; i++) { + bool sense_a, sense_b, sense_b_prime; + int j = 0; + + /* + * wait for bit 0 clear -- out of hblank -- (say reg value 0x4), + * then wait for transition 0x4->0x5->0x4: enter hblank, leave + * hblank again + * use a 10ms timeout (guards against crtc being inactive, in + * which case blank state would never change) + */ + if (!nouveau_timer_wait_eq(ptimer, 10000000, + NV_PRMCIO_INP0__COLOR, + 0x00000001, 0x00000000)) + return -EBUSY; + if (!nouveau_timer_wait_eq(ptimer, 10000000, + NV_PRMCIO_INP0__COLOR, + 0x00000001, 0x00000001)) + return -EBUSY; + if (!nouveau_timer_wait_eq(ptimer, 10000000, + NV_PRMCIO_INP0__COLOR, + 0x00000001, 0x00000000)) + return -EBUSY; + + udelay(100); + /* when level triggers, sense is _LO_ */ + sense_a = nv_rd08(device, NV_PRMCIO_INP0) & 0x10; + + /* take another reading until it agrees with sense_a... */ + do { + udelay(100); + sense_b = nv_rd08(device, NV_PRMCIO_INP0) & 0x10; + if (sense_a != sense_b) { + sense_b_prime = + nv_rd08(device, NV_PRMCIO_INP0) & 0x10; + if (sense_b == sense_b_prime) { + /* ... unless two consecutive subsequent + * samples agree; sense_a is replaced */ + sense_a = sense_b; + /* force mis-match so we loop */ + sense_b = !sense_a; + } + } + } while ((sense_a != sense_b) && ++j < MAX_HBLANK_OSC); + + if (j == MAX_HBLANK_OSC) + /* with so much oscillation, default to sense:LO */ + sense[i] = false; + else + sense[i] = sense_a; + } + + return 0; +} + +static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; + uint8_t saved_palette0[3], saved_palette_mask; + uint32_t saved_rtest_ctrl, saved_rgen_ctrl; + int i; + uint8_t blue; + bool sense = true; + + /* + * for this detection to work, there needs to be a mode set up on the + * CRTC. this is presumed to be the case + */ + + if (nv_two_heads(dev)) + /* only implemented for head A for now */ + NVSetOwner(dev, 0); + + saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX); + NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80); + + saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX); + NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20); + + saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, + saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF); + + msleep(10); + + saved_pi = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX); + NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, + saved_pi & ~(0x80 | MASK(NV_CIO_CRE_PIXEL_FORMAT))); + saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX); + NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0); + + nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS, 0x0); + for (i = 0; i < 3; i++) + saved_palette0[i] = nv_rd08(device, NV_PRMDIO_PALETTE_DATA); + saved_palette_mask = nv_rd08(device, NV_PRMDIO_PIXEL_MASK); + nv_wr08(device, NV_PRMDIO_PIXEL_MASK, 0); + + saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, + (saved_rgen_ctrl & ~(NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS | + NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM)) | + NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON); + + blue = 8; /* start of test range */ + + do { + bool sense_pair[2]; + + nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); + nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0); + nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0); + /* testing blue won't find monochrome monitors. I don't care */ + nv_wr08(device, NV_PRMDIO_PALETTE_DATA, blue); + + i = 0; + /* take sample pairs until both samples in the pair agree */ + do { + if (sample_load_twice(dev, sense_pair)) + goto out; + } while ((sense_pair[0] != sense_pair[1]) && + ++i < MAX_SAMPLE_PAIRS); + + if (i == MAX_SAMPLE_PAIRS) + /* too much oscillation defaults to LO */ + sense = false; + else + sense = sense_pair[0]; + + /* + * if sense goes LO before blue ramps to 0x18, monitor is not connected. + * ergo, if blue gets to 0x18, monitor must be connected + */ + } while (++blue < 0x18 && sense); + +out: + nv_wr08(device, NV_PRMDIO_PIXEL_MASK, saved_palette_mask); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl); + nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); + for (i = 0; i < 3; i++) + nv_wr08(device, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl); + NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); + NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); + NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); + NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode); + + if (blue == 0x18) { + NV_DEBUG(drm, "Load detected on head A\n"); + return connector_status_connected; + } + + return connector_status_disconnected; +} + +uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_gpio *gpio = nouveau_gpio(device); + struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; + uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); + uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, + saved_rtest_ctrl, saved_gpio0 = 0, saved_gpio1 = 0, temp, routput; + int head; + +#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) + if (dcb->type == DCB_OUTPUT_TV) { + testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0); + + if (drm->vbios.tvdactestval) + testval = drm->vbios.tvdactestval; + } else { + testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */ + + if (drm->vbios.dactestval) + testval = drm->vbios.dactestval; + } + + saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, + saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF); + + saved_powerctrl_2 = nv_rd32(device, NV_PBUS_POWERCTRL_2); + + nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff); + if (regoffset == 0x68) { + saved_powerctrl_4 = nv_rd32(device, NV_PBUS_POWERCTRL_4); + nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf); + } + + if (gpio) { + saved_gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff); + saved_gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff); + gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, dcb->type == DCB_OUTPUT_TV); + gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, dcb->type == DCB_OUTPUT_TV); + } + + msleep(4); + + saved_routput = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); + head = (saved_routput & 0x100) >> 8; + + /* if there's a spare crtc, using it will minimise flicker */ + if (!(NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX) & 0xC0)) + head ^= 1; + + /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */ + routput = (saved_routput & 0xfffffece) | head << 8; + + if (nv_device(drm->device)->card_type >= NV_40) { + if (dcb->type == DCB_OUTPUT_TV) + routput |= 0x1a << 16; + else + routput &= ~(0x1a << 16); + } + + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, routput); + msleep(1); + + temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, temp | 1); + + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, + NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK | testval); + temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, + temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED); + msleep(5); + + sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); + /* do it again just in case it's a residual current */ + sample &= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); + + temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, + temp & ~NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, 0); + + /* bios does something more complex for restoring, but I think this is good enough */ + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl); + if (regoffset == 0x68) + nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4); + nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2); + + if (gpio) { + gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1); + gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, saved_gpio0); + } + + return sample; +} + +static enum drm_connector_status +nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) +{ + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; + + if (nv04_dac_in_use(encoder)) + return connector_status_disconnected; + + if (nv17_dac_sample_load(encoder) & + NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) { + NV_DEBUG(drm, "Load detected on output %c\n", + '@' + ffs(dcb->or)); + return connector_status_connected; + } else { + return connector_status_disconnected; + } +} + +static bool nv04_dac_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + if (nv04_dac_in_use(encoder)) + return false; + + return true; +} + +static void nv04_dac_prepare(struct drm_encoder *encoder) +{ + struct drm_encoder_helper_funcs *helper = encoder->helper_private; + struct drm_device *dev = encoder->dev; + int head = nouveau_crtc(encoder->crtc)->index; + + helper->dpms(encoder, DRM_MODE_DPMS_OFF); + + nv04_dfp_disable(dev, head); +} + +static void nv04_dac_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + int head = nouveau_crtc(encoder->crtc)->index; + + if (nv_gf4_disp_arch(dev)) { + struct drm_encoder *rebind; + uint32_t dac_offset = nv04_dac_output_offset(encoder); + uint32_t otherdac; + + /* bit 16-19 are bits that are set on some G70 cards, + * but don't seem to have much effect */ + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset, + head << 8 | NV_PRAMDAC_DACCLK_SEL_DACCLK); + /* force any other vga encoders to bind to the other crtc */ + list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) { + if (rebind == encoder + || nouveau_encoder(rebind)->dcb->type != DCB_OUTPUT_ANALOG) + continue; + + dac_offset = nv04_dac_output_offset(rebind); + otherdac = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset, + (otherdac & ~0x0100) | (head ^ 1) << 8); + } + } + + /* This could use refinement for flatpanels, but it should work this way */ + if (nv_device(drm->device)->chipset < 0x44) + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); + else + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); +} + +static void nv04_dac_commit(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct drm_encoder_helper_funcs *helper = encoder->helper_private; + + helper->dpms(encoder, DRM_MODE_DPMS_ON); + + NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n", + drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), + nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); +} + +void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable) +{ + struct drm_device *dev = encoder->dev; + struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; + + if (nv_gf4_disp_arch(dev)) { + uint32_t *dac_users = &nv04_display(dev)->dac_users[ffs(dcb->or) - 1]; + int dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder); + uint32_t dacclk = NVReadRAMDAC(dev, 0, dacclk_off); + + if (enable) { + *dac_users |= 1 << dcb->index; + NVWriteRAMDAC(dev, 0, dacclk_off, dacclk | NV_PRAMDAC_DACCLK_SEL_DACCLK); + + } else { + *dac_users &= ~(1 << dcb->index); + if (!*dac_users) + NVWriteRAMDAC(dev, 0, dacclk_off, + dacclk & ~NV_PRAMDAC_DACCLK_SEL_DACCLK); + } + } +} + +/* Check if the DAC corresponding to 'encoder' is being used by + * someone else. */ +bool nv04_dac_in_use(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; + + return nv_gf4_disp_arch(encoder->dev) && + (nv04_display(dev)->dac_users[ffs(dcb->or) - 1] & ~(1 << dcb->index)); +} + +static void nv04_dac_dpms(struct drm_encoder *encoder, int mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + + if (nv_encoder->last_dpms == mode) + return; + nv_encoder->last_dpms = mode; + + NV_DEBUG(drm, "Setting dpms mode %d on vga encoder (output %d)\n", + mode, nv_encoder->dcb->index); + + nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); +} + +static void nv04_dac_save(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + + if (nv_gf4_disp_arch(dev)) + nv_encoder->restore.output = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + + nv04_dac_output_offset(encoder)); +} + +static void nv04_dac_restore(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + + if (nv_gf4_disp_arch(dev)) + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder), + nv_encoder->restore.output); + + nv_encoder->last_dpms = NV_DPMS_CLEARED; +} + +static void nv04_dac_destroy(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + + drm_encoder_cleanup(encoder); + kfree(nv_encoder); +} + +static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = { + .dpms = nv04_dac_dpms, + .save = nv04_dac_save, + .restore = nv04_dac_restore, + .mode_fixup = nv04_dac_mode_fixup, + .prepare = nv04_dac_prepare, + .commit = nv04_dac_commit, + .mode_set = nv04_dac_mode_set, + .detect = nv04_dac_detect +}; + +static const struct drm_encoder_helper_funcs nv17_dac_helper_funcs = { + .dpms = nv04_dac_dpms, + .save = nv04_dac_save, + .restore = nv04_dac_restore, + .mode_fixup = nv04_dac_mode_fixup, + .prepare = nv04_dac_prepare, + .commit = nv04_dac_commit, + .mode_set = nv04_dac_mode_set, + .detect = nv17_dac_detect +}; + +static const struct drm_encoder_funcs nv04_dac_funcs = { + .destroy = nv04_dac_destroy, +}; + +int +nv04_dac_create(struct drm_connector *connector, struct dcb_output *entry) +{ + const struct drm_encoder_helper_funcs *helper; + struct nouveau_encoder *nv_encoder = NULL; + struct drm_device *dev = connector->dev; + struct drm_encoder *encoder; + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + + encoder = to_drm_encoder(nv_encoder); + + nv_encoder->dcb = entry; + nv_encoder->or = ffs(entry->or) - 1; + + if (nv_gf4_disp_arch(dev)) + helper = &nv17_dac_helper_funcs; + else + helper = &nv04_dac_helper_funcs; + + drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC); + drm_encoder_helper_add(encoder, helper); + + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + + drm_mode_connector_attach_encoder(connector, encoder); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c new file mode 100644 index 0000000..93dd23f --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c @@ -0,0 +1,720 @@ +/* + * Copyright 2003 NVIDIA, Corporation + * Copyright 2006 Dave Airlie + * Copyright 2007 Maarten Maathuis + * Copyright 2007-2009 Stuart Bennett + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "nouveau_drm.h" +#include "nouveau_reg.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_crtc.h" +#include "hw.h" +#include "nvreg.h" + +#include <drm/i2c/sil164.h> + +#include <subdev/i2c.h> + +#define FP_TG_CONTROL_ON (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | \ + NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | \ + NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS) +#define FP_TG_CONTROL_OFF (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE | \ + NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE | \ + NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE) + +static inline bool is_fpc_off(uint32_t fpc) +{ + return ((fpc & (FP_TG_CONTROL_ON | FP_TG_CONTROL_OFF)) == + FP_TG_CONTROL_OFF); +} + +int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_output *dcbent) +{ + /* special case of nv_read_tmds to find crtc associated with an output. + * this does not give a correct answer for off-chip dvi, but there's no + * use for such an answer anyway + */ + int ramdac = (dcbent->or & DCB_OUTPUT_C) >> 2; + + NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL, + NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | 0x4); + return ((NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA) & 0x8) >> 3) ^ ramdac; +} + +void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_output *dcbent, + int head, bool dl) +{ + /* The BIOS scripts don't do this for us, sadly + * Luckily we do know the values ;-) + * + * head < 0 indicates we wish to force a setting with the overrideval + * (for VT restore etc.) + */ + + int ramdac = (dcbent->or & DCB_OUTPUT_C) >> 2; + uint8_t tmds04 = 0x80; + + if (head != ramdac) + tmds04 = 0x88; + + if (dcbent->type == DCB_OUTPUT_LVDS) + tmds04 |= 0x01; + + nv_write_tmds(dev, dcbent->or, 0, 0x04, tmds04); + + if (dl) /* dual link */ + nv_write_tmds(dev, dcbent->or, 1, 0x04, tmds04 ^ 0x08); +} + +void nv04_dfp_disable(struct drm_device *dev, int head) +{ + struct nv04_crtc_reg *crtcstate = nv04_display(dev)->mode_reg.crtc_reg; + + if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) & + FP_TG_CONTROL_ON) { + /* digital remnants must be cleaned before new crtc + * values programmed. delay is time for the vga stuff + * to realise it's in control again + */ + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, + FP_TG_CONTROL_OFF); + msleep(50); + } + /* don't inadvertently turn it on when state written later */ + crtcstate[head].fp_control = FP_TG_CONTROL_OFF; + crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] &= + ~NV_CIO_CRE_LCD_ROUTE_MASK; +} + +void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_crtc *crtc; + struct nouveau_crtc *nv_crtc; + uint32_t *fpc; + + if (mode == DRM_MODE_DPMS_ON) { + nv_crtc = nouveau_crtc(encoder->crtc); + fpc = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].fp_control; + + if (is_fpc_off(*fpc)) { + /* using saved value is ok, as (is_digital && dpms_on && + * fp_control==OFF) is (at present) *only* true when + * fpc's most recent change was by below "off" code + */ + *fpc = nv_crtc->dpms_saved_fp_control; + } + + nv_crtc->fp_users |= 1 << nouveau_encoder(encoder)->dcb->index; + NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_FP_TG_CONTROL, *fpc); + } else { + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + nv_crtc = nouveau_crtc(crtc); + fpc = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].fp_control; + + nv_crtc->fp_users &= ~(1 << nouveau_encoder(encoder)->dcb->index); + if (!is_fpc_off(*fpc) && !nv_crtc->fp_users) { + nv_crtc->dpms_saved_fp_control = *fpc; + /* cut the FP output */ + *fpc &= ~FP_TG_CONTROL_ON; + *fpc |= FP_TG_CONTROL_OFF; + NVWriteRAMDAC(dev, nv_crtc->index, + NV_PRAMDAC_FP_TG_CONTROL, *fpc); + } + } + } +} + +static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; + struct drm_encoder *slave; + + if (dcb->type != DCB_OUTPUT_TMDS || dcb->location == DCB_LOC_ON_CHIP) + return NULL; + + /* Some BIOSes (e.g. the one in a Quadro FX1000) report several + * TMDS transmitters at the same I2C address, in the same I2C + * bus. This can still work because in that case one of them is + * always hard-wired to a reasonable configuration using straps, + * and the other one needs to be programmed. + * + * I don't think there's a way to know which is which, even the + * blob programs the one exposed via I2C for *both* heads, so + * let's do the same. + */ + list_for_each_entry(slave, &dev->mode_config.encoder_list, head) { + struct dcb_output *slave_dcb = nouveau_encoder(slave)->dcb; + + if (slave_dcb->type == DCB_OUTPUT_TMDS && get_slave_funcs(slave) && + slave_dcb->tmdsconf.slave_addr == dcb->tmdsconf.slave_addr) + return slave; + } + + return NULL; +} + +static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); + + if (!nv_connector->native_mode || + nv_connector->scaling_mode == DRM_MODE_SCALE_NONE || + mode->hdisplay > nv_connector->native_mode->hdisplay || + mode->vdisplay > nv_connector->native_mode->vdisplay) { + nv_encoder->mode = *adjusted_mode; + + } else { + nv_encoder->mode = *nv_connector->native_mode; + adjusted_mode->clock = nv_connector->native_mode->clock; + } + + return true; +} + +static void nv04_dfp_prepare_sel_clk(struct drm_device *dev, + struct nouveau_encoder *nv_encoder, int head) +{ + struct nv04_mode_state *state = &nv04_display(dev)->mode_reg; + uint32_t bits1618 = nv_encoder->dcb->or & DCB_OUTPUT_A ? 0x10000 : 0x40000; + + if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP) + return; + + /* SEL_CLK is only used on the primary ramdac + * It toggles spread spectrum PLL output and sets the bindings of PLLs + * to heads on digital outputs + */ + if (head) + state->sel_clk |= bits1618; + else + state->sel_clk &= ~bits1618; + + /* nv30: + * bit 0 NVClk spread spectrum on/off + * bit 2 MemClk spread spectrum on/off + * bit 4 PixClk1 spread spectrum on/off toggle + * bit 6 PixClk2 spread spectrum on/off toggle + * + * nv40 (observations from bios behaviour and mmio traces): + * bits 4&6 as for nv30 + * bits 5&7 head dependent as for bits 4&6, but do not appear with 4&6; + * maybe a different spread mode + * bits 8&10 seen on dual-link dvi outputs, purpose unknown (set by POST scripts) + * The logic behind turning spread spectrum on/off in the first place, + * and which bit-pair to use, is unclear on nv40 (for earlier cards, the fp table + * entry has the necessary info) + */ + if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS && nv04_display(dev)->saved_reg.sel_clk & 0xf0) { + int shift = (nv04_display(dev)->saved_reg.sel_clk & 0x50) ? 0 : 1; + + state->sel_clk &= ~0xf0; + state->sel_clk |= (head ? 0x40 : 0x10) << shift; + } +} + +static void nv04_dfp_prepare(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_encoder_helper_funcs *helper = encoder->helper_private; + struct drm_device *dev = encoder->dev; + int head = nouveau_crtc(encoder->crtc)->index; + struct nv04_crtc_reg *crtcstate = nv04_display(dev)->mode_reg.crtc_reg; + uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX]; + uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX]; + + helper->dpms(encoder, DRM_MODE_DPMS_OFF); + + nv04_dfp_prepare_sel_clk(dev, nv_encoder, head); + + *cr_lcd = (*cr_lcd & ~NV_CIO_CRE_LCD_ROUTE_MASK) | 0x3; + + if (nv_two_heads(dev)) { + if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP) + *cr_lcd |= head ? 0x0 : 0x8; + else { + *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30; + if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) + *cr_lcd |= 0x30; + if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) { + /* avoid being connected to both crtcs */ + *cr_lcd_oth &= ~0x30; + NVWriteVgaCrtc(dev, head ^ 1, + NV_CIO_CRE_LCD__INDEX, + *cr_lcd_oth); + } + } + } +} + + +static void nv04_dfp_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; + struct nv04_crtc_reg *savep = &nv04_display(dev)->saved_reg.crtc_reg[nv_crtc->index]; + struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_display_mode *output_mode = &nv_encoder->mode; + struct drm_connector *connector = &nv_connector->base; + uint32_t mode_ratio, panel_ratio; + + NV_DEBUG(drm, "Output mode on CRTC %d:\n", nv_crtc->index); + drm_mode_debug_printmodeline(output_mode); + + /* Initialize the FP registers in this CRTC. */ + regp->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1; + regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1; + if (!nv_gf4_disp_arch(dev) || + (output_mode->hsync_start - output_mode->hdisplay) >= + drm->vbios.digital_min_front_porch) + regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay; + else + regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - drm->vbios.digital_min_front_porch - 1; + regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1; + regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1; + regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew; + regp->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - 1; + + regp->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1; + regp->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1; + regp->fp_vert_regs[FP_CRTC] = output_mode->vtotal - 5 - 1; + regp->fp_vert_regs[FP_SYNC_START] = output_mode->vsync_start - 1; + regp->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1; + regp->fp_vert_regs[FP_VALID_START] = 0; + regp->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - 1; + + /* bit26: a bit seen on some g7x, no as yet discernable purpose */ + regp->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | + (savep->fp_control & (1 << 26 | NV_PRAMDAC_FP_TG_CONTROL_READ_PROG)); + /* Deal with vsync/hsync polarity */ + /* LVDS screens do set this, but modes with +ve syncs are very rare */ + if (output_mode->flags & DRM_MODE_FLAG_PVSYNC) + regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS; + if (output_mode->flags & DRM_MODE_FLAG_PHSYNC) + regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS; + /* panel scaling first, as native would get set otherwise */ + if (nv_connector->scaling_mode == DRM_MODE_SCALE_NONE || + nv_connector->scaling_mode == DRM_MODE_SCALE_CENTER) /* panel handles it */ + regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER; + else if (adjusted_mode->hdisplay == output_mode->hdisplay && + adjusted_mode->vdisplay == output_mode->vdisplay) /* native mode */ + regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE; + else /* gpu needs to scale */ + regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE; + if (nv_rd32(device, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT) + regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12; + if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && + output_mode->clock > 165000) + regp->fp_control |= (2 << 24); + if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) { + bool duallink = false, dummy; + if (nv_connector->edid && + nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) { + duallink = (((u8 *)nv_connector->edid)[121] == 2); + } else { + nouveau_bios_parse_lvds_table(dev, output_mode->clock, + &duallink, &dummy); + } + + if (duallink) + regp->fp_control |= (8 << 28); + } else + if (output_mode->clock > 165000) + regp->fp_control |= (8 << 28); + + regp->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND | + NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND | + NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR | + NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR | + NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED | + NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE | + NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE; + + /* We want automatic scaling */ + regp->fp_debug_1 = 0; + /* This can override HTOTAL and VTOTAL */ + regp->fp_debug_2 = 0; + + /* Use 20.12 fixed point format to avoid floats */ + mode_ratio = (1 << 12) * adjusted_mode->hdisplay / adjusted_mode->vdisplay; + panel_ratio = (1 << 12) * output_mode->hdisplay / output_mode->vdisplay; + /* if ratios are equal, SCALE_ASPECT will automatically (and correctly) + * get treated the same as SCALE_FULLSCREEN */ + if (nv_connector->scaling_mode == DRM_MODE_SCALE_ASPECT && + mode_ratio != panel_ratio) { + uint32_t diff, scale; + bool divide_by_2 = nv_gf4_disp_arch(dev); + + if (mode_ratio < panel_ratio) { + /* vertical needs to expand to glass size (automatic) + * horizontal needs to be scaled at vertical scale factor + * to maintain aspect */ + + scale = (1 << 12) * adjusted_mode->vdisplay / output_mode->vdisplay; + regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE | + XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE); + + /* restrict area of screen used, horizontally */ + diff = output_mode->hdisplay - + output_mode->vdisplay * mode_ratio / (1 << 12); + regp->fp_horiz_regs[FP_VALID_START] += diff / 2; + regp->fp_horiz_regs[FP_VALID_END] -= diff / 2; + } + + if (mode_ratio > panel_ratio) { + /* horizontal needs to expand to glass size (automatic) + * vertical needs to be scaled at horizontal scale factor + * to maintain aspect */ + + scale = (1 << 12) * adjusted_mode->hdisplay / output_mode->hdisplay; + regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE | + XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE); + + /* restrict area of screen used, vertically */ + diff = output_mode->vdisplay - + (1 << 12) * output_mode->hdisplay / mode_ratio; + regp->fp_vert_regs[FP_VALID_START] += diff / 2; + regp->fp_vert_regs[FP_VALID_END] -= diff / 2; + } + } + + /* Output property. */ + if ((nv_connector->dithering_mode == DITHERING_MODE_ON) || + (nv_connector->dithering_mode == DITHERING_MODE_AUTO && + encoder->crtc->fb->depth > connector->display_info.bpc * 3)) { + if (nv_device(drm->device)->chipset == 0x11) + regp->dither = savep->dither | 0x00010000; + else { + int i; + regp->dither = savep->dither | 0x00000001; + for (i = 0; i < 3; i++) { + regp->dither_regs[i] = 0xe4e4e4e4; + regp->dither_regs[i + 3] = 0x44444444; + } + } + } else { + if (nv_device(drm->device)->chipset != 0x11) { + /* reset them */ + int i; + for (i = 0; i < 3; i++) { + regp->dither_regs[i] = savep->dither_regs[i]; + regp->dither_regs[i + 3] = savep->dither_regs[i + 3]; + } + } + regp->dither = savep->dither; + } + + regp->fp_margin_color = 0; +} + +static void nv04_dfp_commit(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct drm_encoder_helper_funcs *helper = encoder->helper_private; + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct dcb_output *dcbe = nv_encoder->dcb; + int head = nouveau_crtc(encoder->crtc)->index; + struct drm_encoder *slave_encoder; + + if (dcbe->type == DCB_OUTPUT_TMDS) + run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); + else if (dcbe->type == DCB_OUTPUT_LVDS) + call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock); + + /* update fp_control state for any changes made by scripts, + * so correct value is written at DPMS on */ + nv04_display(dev)->mode_reg.crtc_reg[head].fp_control = + NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); + + /* This could use refinement for flatpanels, but it should work this way */ + if (nv_device(drm->device)->chipset < 0x44) + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); + else + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); + + /* Init external transmitters */ + slave_encoder = get_tmds_slave(encoder); + if (slave_encoder) + get_slave_funcs(slave_encoder)->mode_set( + slave_encoder, &nv_encoder->mode, &nv_encoder->mode); + + helper->dpms(encoder, DRM_MODE_DPMS_ON); + + NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n", + drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), + nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); +} + +static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode) +{ +#ifdef __powerpc__ + struct drm_device *dev = encoder->dev; + struct nouveau_device *device = nouveau_dev(dev); + + /* BIOS scripts usually take care of the backlight, thanks + * Apple for your consistency. + */ + if (dev->pci_device == 0x0174 || dev->pci_device == 0x0179 || + dev->pci_device == 0x0189 || dev->pci_device == 0x0329) { + if (mode == DRM_MODE_DPMS_ON) { + nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31); + nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1); + } else { + nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0); + nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 0); + } + } +#endif +} + +static inline bool is_powersaving_dpms(int mode) +{ + return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED; +} + +static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_crtc *crtc = encoder->crtc; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + bool was_powersaving = is_powersaving_dpms(nv_encoder->last_dpms); + + if (nv_encoder->last_dpms == mode) + return; + nv_encoder->last_dpms = mode; + + NV_DEBUG(drm, "Setting dpms mode %d on lvds encoder (output %d)\n", + mode, nv_encoder->dcb->index); + + if (was_powersaving && is_powersaving_dpms(mode)) + return; + + if (nv_encoder->dcb->lvdsconf.use_power_scripts) { + /* when removing an output, crtc may not be set, but PANEL_OFF + * must still be run + */ + int head = crtc ? nouveau_crtc(crtc)->index : + nv04_dfp_get_bound_head(dev, nv_encoder->dcb); + + if (mode == DRM_MODE_DPMS_ON) { + call_lvds_script(dev, nv_encoder->dcb, head, + LVDS_PANEL_ON, nv_encoder->mode.clock); + } else + /* pxclk of 0 is fine for PANEL_OFF, and for a + * disconnected LVDS encoder there is no native_mode + */ + call_lvds_script(dev, nv_encoder->dcb, head, + LVDS_PANEL_OFF, 0); + } + + nv04_dfp_update_backlight(encoder, mode); + nv04_dfp_update_fp_control(encoder, mode); + + if (mode == DRM_MODE_DPMS_ON) + nv04_dfp_prepare_sel_clk(dev, nv_encoder, nouveau_crtc(crtc)->index); + else { + nv04_display(dev)->mode_reg.sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); + nv04_display(dev)->mode_reg.sel_clk &= ~0xf0; + } + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk); +} + +static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode) +{ + struct nouveau_drm *drm = nouveau_drm(encoder->dev); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + + if (nv_encoder->last_dpms == mode) + return; + nv_encoder->last_dpms = mode; + + NV_DEBUG(drm, "Setting dpms mode %d on tmds encoder (output %d)\n", + mode, nv_encoder->dcb->index); + + nv04_dfp_update_backlight(encoder, mode); + nv04_dfp_update_fp_control(encoder, mode); +} + +static void nv04_dfp_save(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + + if (nv_two_heads(dev)) + nv_encoder->restore.head = + nv04_dfp_get_bound_head(dev, nv_encoder->dcb); +} + +static void nv04_dfp_restore(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + int head = nv_encoder->restore.head; + + if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) { + struct nouveau_connector *connector = + nouveau_encoder_connector_get(nv_encoder); + + if (connector && connector->native_mode) + call_lvds_script(dev, nv_encoder->dcb, head, + LVDS_PANEL_ON, + connector->native_mode->clock); + + } else if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS) { + int clock = nouveau_hw_pllvals_to_clk + (&nv04_display(dev)->saved_reg.crtc_reg[head].pllvals); + + run_tmds_table(dev, nv_encoder->dcb, head, clock); + } + + nv_encoder->last_dpms = NV_DPMS_CLEARED; +} + +static void nv04_dfp_destroy(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + + if (get_slave_funcs(encoder)) + get_slave_funcs(encoder)->destroy(encoder); + + drm_encoder_cleanup(encoder); + kfree(nv_encoder); +} + +static void nv04_tmds_slave_init(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_i2c *i2c = nouveau_i2c(drm->device); + struct nouveau_i2c_port *port = i2c->find(i2c, 2); + struct i2c_board_info info[] = { + { + .type = "sil164", + .addr = (dcb->tmdsconf.slave_addr == 0x7 ? 0x3a : 0x38), + .platform_data = &(struct sil164_encoder_params) { + SIL164_INPUT_EDGE_RISING + } + }, + { } + }; + int type; + + if (!nv_gf4_disp_arch(dev) || !port || + get_tmds_slave(encoder)) + return; + + type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL); + if (type < 0) + return; + + drm_i2c_encoder_init(dev, to_encoder_slave(encoder), + &port->adapter, &info[type]); +} + +static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = { + .dpms = nv04_lvds_dpms, + .save = nv04_dfp_save, + .restore = nv04_dfp_restore, + .mode_fixup = nv04_dfp_mode_fixup, + .prepare = nv04_dfp_prepare, + .commit = nv04_dfp_commit, + .mode_set = nv04_dfp_mode_set, + .detect = NULL, +}; + +static const struct drm_encoder_helper_funcs nv04_tmds_helper_funcs = { + .dpms = nv04_tmds_dpms, + .save = nv04_dfp_save, + .restore = nv04_dfp_restore, + .mode_fixup = nv04_dfp_mode_fixup, + .prepare = nv04_dfp_prepare, + .commit = nv04_dfp_commit, + .mode_set = nv04_dfp_mode_set, + .detect = NULL, +}; + +static const struct drm_encoder_funcs nv04_dfp_funcs = { + .destroy = nv04_dfp_destroy, +}; + +int +nv04_dfp_create(struct drm_connector *connector, struct dcb_output *entry) +{ + const struct drm_encoder_helper_funcs *helper; + struct nouveau_encoder *nv_encoder = NULL; + struct drm_encoder *encoder; + int type; + + switch (entry->type) { + case DCB_OUTPUT_TMDS: + type = DRM_MODE_ENCODER_TMDS; + helper = &nv04_tmds_helper_funcs; + break; + case DCB_OUTPUT_LVDS: + type = DRM_MODE_ENCODER_LVDS; + helper = &nv04_lvds_helper_funcs; + break; + default: + return -EINVAL; + } + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + + encoder = to_drm_encoder(nv_encoder); + + nv_encoder->dcb = entry; + nv_encoder->or = ffs(entry->or) - 1; + + drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type); + drm_encoder_helper_add(encoder, helper); + + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + + if (entry->type == DCB_OUTPUT_TMDS && + entry->location != DCB_LOC_ON_CHIP) + nv04_tmds_slave_init(encoder); + + drm_mode_connector_attach_encoder(connector, encoder); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c new file mode 100644 index 0000000..4908d3f --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -0,0 +1,211 @@ +/* + * Copyright 2009 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Ben Skeggs + */ + +#include <core/object.h> +#include <core/class.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "nouveau_drm.h" +#include "nouveau_reg.h" +#include "hw.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" + +#include <subdev/i2c.h> + +int +nv04_display_early_init(struct drm_device *dev) +{ + /* ensure vblank interrupts are off, they can't be enabled until + * drm_vblank has been initialised + */ + NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0); + if (nv_two_heads(dev)) + NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0); + + return 0; +} + +void +nv04_display_late_takedown(struct drm_device *dev) +{ +} + +int +nv04_display_create(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_i2c *i2c = nouveau_i2c(drm->device); + struct dcb_table *dcb = &drm->vbios.dcb; + struct drm_connector *connector, *ct; + struct drm_encoder *encoder; + struct drm_crtc *crtc; + struct nv04_display *disp; + int i, ret; + + disp = kzalloc(sizeof(*disp), GFP_KERNEL); + if (!disp) + return -ENOMEM; + + nouveau_display(dev)->priv = disp; + nouveau_display(dev)->dtor = nv04_display_destroy; + nouveau_display(dev)->init = nv04_display_init; + nouveau_display(dev)->fini = nv04_display_fini; + + nouveau_hw_save_vga_fonts(dev, 1); + + ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, 0xd1500000, + NV04_DISP_CLASS, NULL, 0, &disp->core); + if (ret) + return ret; + + nv04_crtc_create(dev, 0); + if (nv_two_heads(dev)) + nv04_crtc_create(dev, 1); + + for (i = 0; i < dcb->entries; i++) { + struct dcb_output *dcbent = &dcb->entry[i]; + + connector = nouveau_connector_create(dev, dcbent->connector); + if (IS_ERR(connector)) + continue; + + switch (dcbent->type) { + case DCB_OUTPUT_ANALOG: + ret = nv04_dac_create(connector, dcbent); + break; + case DCB_OUTPUT_LVDS: + case DCB_OUTPUT_TMDS: + ret = nv04_dfp_create(connector, dcbent); + break; + case DCB_OUTPUT_TV: + if (dcbent->location == DCB_LOC_ON_CHIP) + ret = nv17_tv_create(connector, dcbent); + else + ret = nv04_tv_create(connector, dcbent); + break; + default: + NV_WARN(drm, "DCB type %d not known\n", dcbent->type); + continue; + } + + if (ret) + continue; + } + + list_for_each_entry_safe(connector, ct, + &dev->mode_config.connector_list, head) { + if (!connector->encoder_ids[0]) { + NV_WARN(drm, "%s has no encoders, removing\n", + drm_get_connector_name(connector)); + connector->funcs->destroy(connector); + } + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + nv_encoder->i2c = i2c->find(i2c, nv_encoder->dcb->i2c_index); + } + + /* Save previous state */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + crtc->funcs->save(crtc); + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct drm_encoder_helper_funcs *func = encoder->helper_private; + + func->save(encoder); + } + + return 0; +} + +void +nv04_display_destroy(struct drm_device *dev) +{ + struct nv04_display *disp = nv04_display(dev); + struct drm_encoder *encoder; + struct drm_crtc *crtc; + + /* Turn every CRTC off. */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct drm_mode_set modeset = { + .crtc = crtc, + }; + + drm_mode_set_config_internal(&modeset); + } + + /* Restore state */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct drm_encoder_helper_funcs *func = encoder->helper_private; + + func->restore(encoder); + } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + crtc->funcs->restore(crtc); + + nouveau_hw_save_vga_fonts(dev, 0); + + nouveau_display(dev)->priv = NULL; + kfree(disp); +} + +int +nv04_display_init(struct drm_device *dev) +{ + struct drm_encoder *encoder; + struct drm_crtc *crtc; + + /* meh.. modeset apparently doesn't setup all the regs and depends + * on pre-existing state, for now load the state of the card *before* + * nouveau was loaded, and then do a modeset. + * + * best thing to do probably is to make save/restore routines not + * save/restore "pre-load" state, but more general so we can save + * on suspend too. + */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct drm_encoder_helper_funcs *func = encoder->helper_private; + + func->restore(encoder); + } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + crtc->funcs->restore(crtc); + + return 0; +} + +void +nv04_display_fini(struct drm_device *dev) +{ + /* disable vblank interrupts */ + NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0); + if (nv_two_heads(dev)) + NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0); +} diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h new file mode 100644 index 0000000..a0a031d --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h @@ -0,0 +1,185 @@ +#ifndef __NV04_DISPLAY_H__ +#define __NV04_DISPLAY_H__ + +#include <subdev/bios/pll.h> + +#include "nouveau_display.h" + +enum nv04_fp_display_regs { + FP_DISPLAY_END, + FP_TOTAL, + FP_CRTC, + FP_SYNC_START, + FP_SYNC_END, + FP_VALID_START, + FP_VALID_END +}; + +struct nv04_crtc_reg { + unsigned char MiscOutReg; + uint8_t CRTC[0xa0]; + uint8_t CR58[0x10]; + uint8_t Sequencer[5]; + uint8_t Graphics[9]; + uint8_t Attribute[21]; + unsigned char DAC[768]; + + /* PCRTC regs */ + uint32_t fb_start; + uint32_t crtc_cfg; + uint32_t cursor_cfg; + uint32_t gpio_ext; + uint32_t crtc_830; + uint32_t crtc_834; + uint32_t crtc_850; + uint32_t crtc_eng_ctrl; + + /* PRAMDAC regs */ + uint32_t nv10_cursync; + struct nouveau_pll_vals pllvals; + uint32_t ramdac_gen_ctrl; + uint32_t ramdac_630; + uint32_t ramdac_634; + uint32_t tv_setup; + uint32_t tv_vtotal; + uint32_t tv_vskew; + uint32_t tv_vsync_delay; + uint32_t tv_htotal; + uint32_t tv_hskew; + uint32_t tv_hsync_delay; + uint32_t tv_hsync_delay2; + uint32_t fp_horiz_regs[7]; + uint32_t fp_vert_regs[7]; + uint32_t dither; + uint32_t fp_control; + uint32_t dither_regs[6]; + uint32_t fp_debug_0; + uint32_t fp_debug_1; + uint32_t fp_debug_2; + uint32_t fp_margin_color; + uint32_t ramdac_8c0; + uint32_t ramdac_a20; + uint32_t ramdac_a24; + uint32_t ramdac_a34; + uint32_t ctv_regs[38]; +}; + +struct nv04_output_reg { + uint32_t output; + int head; +}; + +struct nv04_mode_state { + struct nv04_crtc_reg crtc_reg[2]; + uint32_t pllsel; + uint32_t sel_clk; +}; + +struct nv04_display { + struct nv04_mode_state mode_reg; + struct nv04_mode_state saved_reg; + uint32_t saved_vga_font[4][16384]; + uint32_t dac_users[4]; + struct nouveau_object *core; +}; + +static inline struct nv04_display * +nv04_display(struct drm_device *dev) +{ + return nouveau_display(dev)->priv; +} + +/* nv04_display.c */ +int nv04_display_early_init(struct drm_device *); +void nv04_display_late_takedown(struct drm_device *); +int nv04_display_create(struct drm_device *); +void nv04_display_destroy(struct drm_device *); +int nv04_display_init(struct drm_device *); +void nv04_display_fini(struct drm_device *); + +/* nv04_crtc.c */ +int nv04_crtc_create(struct drm_device *, int index); + +/* nv04_dac.c */ +int nv04_dac_create(struct drm_connector *, struct dcb_output *); +uint32_t nv17_dac_sample_load(struct drm_encoder *encoder); +int nv04_dac_output_offset(struct drm_encoder *encoder); +void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable); +bool nv04_dac_in_use(struct drm_encoder *encoder); + +/* nv04_dfp.c */ +int nv04_dfp_create(struct drm_connector *, struct dcb_output *); +int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_output *dcbent); +void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_output *dcbent, + int head, bool dl); +void nv04_dfp_disable(struct drm_device *dev, int head); +void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode); + +/* nv04_tv.c */ +int nv04_tv_identify(struct drm_device *dev, int i2c_index); +int nv04_tv_create(struct drm_connector *, struct dcb_output *); + +/* nv17_tv.c */ +int nv17_tv_create(struct drm_connector *, struct dcb_output *); + +static inline bool +nv_two_heads(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + const int impl = dev->pci_device & 0x0ff0; + + if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 && + impl != 0x0150 && impl != 0x01a0 && impl != 0x0200) + return true; + + return false; +} + +static inline bool +nv_gf4_disp_arch(struct drm_device *dev) +{ + return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110; +} + +static inline bool +nv_two_reg_pll(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + const int impl = dev->pci_device & 0x0ff0; + + if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40) + return true; + return false; +} + +static inline bool +nv_match_device(struct drm_device *dev, unsigned device, + unsigned sub_vendor, unsigned sub_device) +{ + return dev->pdev->device == device && + dev->pdev->subsystem_vendor == sub_vendor && + dev->pdev->subsystem_device == sub_device; +} + +#include <subdev/bios.h> +#include <subdev/bios/init.h> + +static inline void +nouveau_bios_run_init_table(struct drm_device *dev, u16 table, + struct dcb_output *outp, int crtc) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_bios *bios = nouveau_bios(device); + struct nvbios_init init = { + .subdev = nv_subdev(bios), + .bios = bios, + .offset = table, + .outp = outp, + .crtc = crtc, + .execute = 1, + }; + + nvbios_exec(&init); +} + +#endif diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c new file mode 100644 index 0000000..973056b --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c @@ -0,0 +1,827 @@ +/* + * Copyright 2006 Dave Airlie + * Copyright 2007 Maarten Maathuis + * Copyright 2007-2009 Stuart Bennett + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF + * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <drm/drmP.h> +#include "nouveau_drm.h" +#include "hw.h" + +#include <subdev/bios/pll.h> +#include <subdev/clock.h> +#include <subdev/timer.h> + +#define CHIPSET_NFORCE 0x01a0 +#define CHIPSET_NFORCE2 0x01f0 + +/* + * misc hw access wrappers/control functions + */ + +void +NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value) +{ + NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index); + NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value); +} + +uint8_t +NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index) +{ + NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index); + return NVReadPRMVIO(dev, head, NV_PRMVIO_SR); +} + +void +NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value) +{ + NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index); + NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value); +} + +uint8_t +NVReadVgaGr(struct drm_device *dev, int head, uint8_t index) +{ + NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index); + return NVReadPRMVIO(dev, head, NV_PRMVIO_GX); +} + +/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied) + * it affects only the 8 bit vga io regs, which we access using mmio at + * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d* + * in general, the set value of cr44 does not matter: reg access works as + * expected and values can be set for the appropriate head by using a 0x2000 + * offset as required + * however: + * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and + * cr44 must be set to 0 or 3 for accessing values on the correct head + * through the common 0xc03c* addresses + * b) in tied mode (4) head B is programmed to the values set on head A, and + * access using the head B addresses can have strange results, ergo we leave + * tied mode in init once we know to what cr44 should be restored on exit + * + * the owner parameter is slightly abused: + * 0 and 1 are treated as head values and so the set value is (owner * 3) + * other values are treated as literal values to set + */ +void +NVSetOwner(struct drm_device *dev, int owner) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + if (owner == 1) + owner *= 3; + + if (nv_device(drm->device)->chipset == 0x11) { + /* This might seem stupid, but the blob does it and + * omitting it often locks the system up. + */ + NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX); + NVReadVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX); + } + + /* CR44 is always changed on CRTC0 */ + NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner); + + if (nv_device(drm->device)->chipset == 0x11) { /* set me harder */ + NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); + NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); + } +} + +void +NVBlankScreen(struct drm_device *dev, int head, bool blank) +{ + unsigned char seq1; + + if (nv_two_heads(dev)) + NVSetOwner(dev, head); + + seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX); + + NVVgaSeqReset(dev, head, true); + if (blank) + NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20); + else + NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20); + NVVgaSeqReset(dev, head, false); +} + +/* + * PLL getting + */ + +static void +nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1, + uint32_t pll2, struct nouveau_pll_vals *pllvals) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + /* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */ + + /* log2P is & 0x7 as never more than 7, and nv30/35 only uses 3 bits */ + pllvals->log2P = (pll1 >> 16) & 0x7; + pllvals->N2 = pllvals->M2 = 1; + + if (reg1 <= 0x405c) { + pllvals->NM1 = pll2 & 0xffff; + /* single stage NVPLL and VPLLs use 1 << 8, MPLL uses 1 << 12 */ + if (!(pll1 & 0x1100)) + pllvals->NM2 = pll2 >> 16; + } else { + pllvals->NM1 = pll1 & 0xffff; + if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2) + pllvals->NM2 = pll2 & 0xffff; + else if (nv_device(drm->device)->chipset == 0x30 || nv_device(drm->device)->chipset == 0x35) { + pllvals->M1 &= 0xf; /* only 4 bits */ + if (pll1 & NV30_RAMDAC_ENABLE_VCO2) { + pllvals->M2 = (pll1 >> 4) & 0x7; + pllvals->N2 = ((pll1 >> 21) & 0x18) | + ((pll1 >> 19) & 0x7); + } + } + } +} + +int +nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype, + struct nouveau_pll_vals *pllvals) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nv_device(drm->device); + struct nouveau_bios *bios = nouveau_bios(device); + uint32_t reg1, pll1, pll2 = 0; + struct nvbios_pll pll_lim; + int ret; + + ret = nvbios_pll_parse(bios, plltype, &pll_lim); + if (ret || !(reg1 = pll_lim.reg)) + return -ENOENT; + + pll1 = nv_rd32(device, reg1); + if (reg1 <= 0x405c) + pll2 = nv_rd32(device, reg1 + 4); + else if (nv_two_reg_pll(dev)) { + uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70); + + pll2 = nv_rd32(device, reg2); + } + + if (nv_device(drm->device)->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { + uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580); + + /* check whether vpll has been forced into single stage mode */ + if (reg1 == NV_PRAMDAC_VPLL_COEFF) { + if (ramdac580 & NV_RAMDAC_580_VPLL1_ACTIVE) + pll2 = 0; + } else + if (ramdac580 & NV_RAMDAC_580_VPLL2_ACTIVE) + pll2 = 0; + } + + nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals); + pllvals->refclk = pll_lim.refclk; + return 0; +} + +int +nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv) +{ + /* Avoid divide by zero if called at an inappropriate time */ + if (!pv->M1 || !pv->M2) + return 0; + + return pv->N1 * pv->N2 * pv->refclk / (pv->M1 * pv->M2) >> pv->log2P; +} + +int +nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) +{ + struct nouveau_pll_vals pllvals; + int ret; + + if (plltype == PLL_MEMORY && + (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) { + uint32_t mpllP; + + pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); + if (!mpllP) + mpllP = 4; + + return 400000 / mpllP; + } else + if (plltype == PLL_MEMORY && + (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) { + uint32_t clock; + + pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); + return clock; + } + + ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals); + if (ret) + return ret; + + return nouveau_hw_pllvals_to_clk(&pllvals); +} + +static void +nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) +{ + /* the vpll on an unused head can come up with a random value, way + * beyond the pll limits. for some reason this causes the chip to + * lock up when reading the dac palette regs, so set a valid pll here + * when such a condition detected. only seen on nv11 to date + */ + + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nv_device(drm->device); + struct nouveau_clock *clk = nouveau_clock(device); + struct nouveau_bios *bios = nouveau_bios(device); + struct nvbios_pll pll_lim; + struct nouveau_pll_vals pv; + enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0; + + if (nvbios_pll_parse(bios, pll, &pll_lim)) + return; + nouveau_hw_get_pllvals(dev, pll, &pv); + + if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && + pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && + pv.log2P <= pll_lim.max_p) + return; + + NV_WARN(drm, "VPLL %d outwith limits, attempting to fix\n", head + 1); + + /* set lowest clock within static limits */ + pv.M1 = pll_lim.vco1.max_m; + pv.N1 = pll_lim.vco1.min_n; + pv.log2P = pll_lim.max_p_usable; + clk->pll_prog(clk, pll_lim.reg, &pv); +} + +/* + * vga font save/restore + */ + +static void nouveau_vga_font_io(struct drm_device *dev, + void __iomem *iovram, + bool save, unsigned plane) +{ + unsigned i; + + NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane); + NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane); + for (i = 0; i < 16384; i++) { + if (save) { + nv04_display(dev)->saved_vga_font[plane][i] = + ioread32_native(iovram + i * 4); + } else { + iowrite32_native(nv04_display(dev)->saved_vga_font[plane][i], + iovram + i * 4); + } + } +} + +void +nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + uint8_t misc, gr4, gr5, gr6, seq2, seq4; + bool graphicsmode; + unsigned plane; + void __iomem *iovram; + + if (nv_two_heads(dev)) + NVSetOwner(dev, 0); + + NVSetEnablePalette(dev, 0, true); + graphicsmode = NVReadVgaAttr(dev, 0, NV_CIO_AR_MODE_INDEX) & 1; + NVSetEnablePalette(dev, 0, false); + + if (graphicsmode) /* graphics mode => framebuffer => no need to save */ + return; + + NV_INFO(drm, "%sing VGA fonts\n", save ? "Sav" : "Restor"); + + /* map first 64KiB of VRAM, holds VGA fonts etc */ + iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536); + if (!iovram) { + NV_ERROR(drm, "Failed to map VRAM, " + "cannot save/restore VGA fonts.\n"); + return; + } + + if (nv_two_heads(dev)) + NVBlankScreen(dev, 1, true); + NVBlankScreen(dev, 0, true); + + /* save control regs */ + misc = NVReadPRMVIO(dev, 0, NV_PRMVIO_MISC__READ); + seq2 = NVReadVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX); + seq4 = NVReadVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX); + gr4 = NVReadVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX); + gr5 = NVReadVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX); + gr6 = NVReadVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX); + + NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, 0x67); + NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, 0x6); + NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, 0x0); + NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, 0x5); + + /* store font in planes 0..3 */ + for (plane = 0; plane < 4; plane++) + nouveau_vga_font_io(dev, iovram, save, plane); + + /* restore control regs */ + NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, misc); + NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, gr4); + NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, gr5); + NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, gr6); + NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, seq2); + NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, seq4); + + if (nv_two_heads(dev)) + NVBlankScreen(dev, 1, false); + NVBlankScreen(dev, 0, false); + + iounmap(iovram); +} + +/* + * mode state save/load + */ + +static void +rd_cio_state(struct drm_device *dev, int head, + struct nv04_crtc_reg *crtcstate, int index) +{ + crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index); +} + +static void +wr_cio_state(struct drm_device *dev, int head, + struct nv04_crtc_reg *crtcstate, int index) +{ + NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]); +} + +static void +nv_save_state_ramdac(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv04_crtc_reg *regp = &state->crtc_reg[head]; + int i; + + if (nv_device(drm->device)->card_type >= NV_10) + regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); + + nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, ®p->pllvals); + state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT); + if (nv_two_heads(dev)) + state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); + if (nv_device(drm->device)->chipset == 0x11) + regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11); + + regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL); + + if (nv_gf4_disp_arch(dev)) + regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630); + if (nv_device(drm->device)->chipset >= 0x30) + regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634); + + regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP); + regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL); + regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW); + regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY); + regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL); + regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW); + regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY); + regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2); + + for (i = 0; i < 7; i++) { + uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4); + regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg); + regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20); + } + + if (nv_gf4_disp_arch(dev)) { + regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER); + for (i = 0; i < 3; i++) { + regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4); + regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4); + } + } + + regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); + regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0); + if (!nv_gf4_disp_arch(dev) && head == 0) { + /* early chips don't allow access to PRAMDAC_TMDS_* without + * the head A FPCLK on (nv11 even locks up) */ + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0 & + ~NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK); + } + regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1); + regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2); + + regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR); + + if (nv_gf4_disp_arch(dev)) + regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0); + + if (nv_device(drm->device)->card_type == NV_40) { + regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20); + regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24); + regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34); + + for (i = 0; i < 38; i++) + regp->ctv_regs[i] = NVReadRAMDAC(dev, head, + NV_PRAMDAC_CTV + 4*i); + } +} + +static void +nv_load_state_ramdac(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_clock *clk = nouveau_clock(drm->device); + struct nv04_crtc_reg *regp = &state->crtc_reg[head]; + uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; + int i; + + if (nv_device(drm->device)->card_type >= NV_10) + NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync); + + clk->pll_prog(clk, pllreg, ®p->pllvals); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel); + if (nv_two_heads(dev)) + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk); + if (nv_device(drm->device)->chipset == 0x11) + NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither); + + NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl); + + if (nv_gf4_disp_arch(dev)) + NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630); + if (nv_device(drm->device)->chipset >= 0x30) + NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634); + + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2); + + for (i = 0; i < 7; i++) { + uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4); + + NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]); + NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]); + } + + if (nv_gf4_disp_arch(dev)) { + NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither); + for (i = 0; i < 3; i++) { + NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]); + } + } + + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2); + + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color); + + if (nv_gf4_disp_arch(dev)) + NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0); + + if (nv_device(drm->device)->card_type == NV_40) { + NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34); + + for (i = 0; i < 38; i++) + NVWriteRAMDAC(dev, head, + NV_PRAMDAC_CTV + 4*i, regp->ctv_regs[i]); + } +} + +static void +nv_save_state_vga(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + struct nv04_crtc_reg *regp = &state->crtc_reg[head]; + int i; + + regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ); + + for (i = 0; i < 25; i++) + rd_cio_state(dev, head, regp, i); + + NVSetEnablePalette(dev, head, true); + for (i = 0; i < 21; i++) + regp->Attribute[i] = NVReadVgaAttr(dev, head, i); + NVSetEnablePalette(dev, head, false); + + for (i = 0; i < 9; i++) + regp->Graphics[i] = NVReadVgaGr(dev, head, i); + + for (i = 0; i < 5; i++) + regp->Sequencer[i] = NVReadVgaSeq(dev, head, i); +} + +static void +nv_load_state_vga(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + struct nv04_crtc_reg *regp = &state->crtc_reg[head]; + int i; + + NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg); + + for (i = 0; i < 5; i++) + NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]); + + nv_lock_vga_crtc_base(dev, head, false); + for (i = 0; i < 25; i++) + wr_cio_state(dev, head, regp, i); + nv_lock_vga_crtc_base(dev, head, true); + + for (i = 0; i < 9; i++) + NVWriteVgaGr(dev, head, i, regp->Graphics[i]); + + NVSetEnablePalette(dev, head, true); + for (i = 0; i < 21; i++) + NVWriteVgaAttr(dev, head, i, regp->Attribute[i]); + NVSetEnablePalette(dev, head, false); +} + +static void +nv_save_state_ext(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv04_crtc_reg *regp = &state->crtc_reg[head]; + int i; + + rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX); + + rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_21); + + if (nv_device(drm->device)->card_type >= NV_20) + rd_cio_state(dev, head, regp, NV_CIO_CRE_47); + + if (nv_device(drm->device)->card_type >= NV_30) + rd_cio_state(dev, head, regp, 0x9f); + + rd_cio_state(dev, head, regp, NV_CIO_CRE_49); + rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); + + if (nv_device(drm->device)->card_type >= NV_10) { + regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830); + regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834); + + if (nv_device(drm->device)->card_type >= NV_30) + regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT); + + if (nv_device(drm->device)->card_type == NV_40) + regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850); + + if (nv_two_heads(dev)) + regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL); + regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG); + } + + regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG); + + rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); + if (nv_device(drm->device)->card_type >= NV_10) { + rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); + rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB); + rd_cio_state(dev, head, regp, NV_CIO_CRE_4B); + rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY); + } + /* NV11 and NV20 don't have this, they stop at 0x52. */ + if (nv_gf4_disp_arch(dev)) { + rd_cio_state(dev, head, regp, NV_CIO_CRE_42); + rd_cio_state(dev, head, regp, NV_CIO_CRE_53); + rd_cio_state(dev, head, regp, NV_CIO_CRE_54); + + for (i = 0; i < 0x10; i++) + regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i); + rd_cio_state(dev, head, regp, NV_CIO_CRE_59); + rd_cio_state(dev, head, regp, NV_CIO_CRE_5B); + + rd_cio_state(dev, head, regp, NV_CIO_CRE_85); + rd_cio_state(dev, head, regp, NV_CIO_CRE_86); + } + + regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START); +} + +static void +nv_load_state_ext(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nv_device(drm->device); + struct nouveau_timer *ptimer = nouveau_timer(device); + struct nv04_crtc_reg *regp = &state->crtc_reg[head]; + uint32_t reg900; + int i; + + if (nv_device(drm->device)->card_type >= NV_10) { + if (nv_two_heads(dev)) + /* setting ENGINE_CTRL (EC) *must* come before + * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in + * EC that should not be overwritten by writing stale EC + */ + NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl); + + nv_wr32(device, NV_PVIDEO_STOP, 1); + nv_wr32(device, NV_PVIDEO_INTR_EN, 0); + nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0); + nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0); + nv_wr32(device, NV_PVIDEO_LIMIT(0), 0); //drm->fb_available_size - 1); + nv_wr32(device, NV_PVIDEO_LIMIT(1), 0); //drm->fb_available_size - 1); + nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), 0); //drm->fb_available_size - 1); + nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), 0); //drm->fb_available_size - 1); + nv_wr32(device, NV_PBUS_POWERCTRL_2, 0); + + NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); + NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830); + NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834); + + if (nv_device(drm->device)->card_type >= NV_30) + NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext); + + if (nv_device(drm->device)->card_type == NV_40) { + NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); + + reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); + if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC) + NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000); + else + NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000); + } + } + + NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg); + + wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); + + if (nv_device(drm->device)->card_type >= NV_20) + wr_cio_state(dev, head, regp, NV_CIO_CRE_47); + + if (nv_device(drm->device)->card_type >= NV_30) + wr_cio_state(dev, head, regp, 0x9f); + + wr_cio_state(dev, head, regp, NV_CIO_CRE_49); + wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); + if (nv_device(drm->device)->card_type == NV_40) + nv_fix_nv40_hw_cursor(dev, head); + wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); + + wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); + if (nv_device(drm->device)->card_type >= NV_10) { + wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); + wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB); + wr_cio_state(dev, head, regp, NV_CIO_CRE_4B); + wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY); + } + /* NV11 and NV20 stop at 0x52. */ + if (nv_gf4_disp_arch(dev)) { + if (nv_device(drm->device)->card_type == NV_10) { + /* Not waiting for vertical retrace before modifying + CRE_53/CRE_54 causes lockups. */ + nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); + nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); + } + + wr_cio_state(dev, head, regp, NV_CIO_CRE_42); + wr_cio_state(dev, head, regp, NV_CIO_CRE_53); + wr_cio_state(dev, head, regp, NV_CIO_CRE_54); + + for (i = 0; i < 0x10; i++) + NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]); + wr_cio_state(dev, head, regp, NV_CIO_CRE_59); + wr_cio_state(dev, head, regp, NV_CIO_CRE_5B); + + wr_cio_state(dev, head, regp, NV_CIO_CRE_85); + wr_cio_state(dev, head, regp, NV_CIO_CRE_86); + } + + NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start); +} + +static void +nv_save_state_palette(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + struct nouveau_device *device = nouveau_dev(dev); + int head_offset = head * NV_PRMDIO_SIZE, i; + + nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, + NV_PRMDIO_PIXEL_MASK_MASK); + nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0); + + for (i = 0; i < 768; i++) { + state->crtc_reg[head].DAC[i] = nv_rd08(device, + NV_PRMDIO_PALETTE_DATA + head_offset); + } + + NVSetEnablePalette(dev, head, false); +} + +void +nouveau_hw_load_state_palette(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + struct nouveau_device *device = nouveau_dev(dev); + int head_offset = head * NV_PRMDIO_SIZE, i; + + nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, + NV_PRMDIO_PIXEL_MASK_MASK); + nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0); + + for (i = 0; i < 768; i++) { + nv_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset, + state->crtc_reg[head].DAC[i]); + } + + NVSetEnablePalette(dev, head, false); +} + +void nouveau_hw_save_state(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + if (nv_device(drm->device)->chipset == 0x11) + /* NB: no attempt is made to restore the bad pll later on */ + nouveau_hw_fix_bad_vpll(dev, head); + nv_save_state_ramdac(dev, head, state); + nv_save_state_vga(dev, head, state); + nv_save_state_palette(dev, head, state); + nv_save_state_ext(dev, head, state); +} + +void nouveau_hw_load_state(struct drm_device *dev, int head, + struct nv04_mode_state *state) +{ + NVVgaProtect(dev, head, true); + nv_load_state_ramdac(dev, head, state); + nv_load_state_ext(dev, head, state); + nouveau_hw_load_state_palette(dev, head, state); + nv_load_state_vga(dev, head, state); + NVVgaProtect(dev, head, false); +} diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h new file mode 100644 index 0000000..eeb70d9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h @@ -0,0 +1,409 @@ +/* + * Copyright 2008 Stuart Bennett + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF + * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __NOUVEAU_HW_H__ +#define __NOUVEAU_HW_H__ + +#include <drm/drmP.h> +#include "disp.h" +#include "nvreg.h" + +#include <subdev/bios/pll.h> + +#define MASK(field) ( \ + (0xffffffff >> (31 - ((1 ? field) - (0 ? field)))) << (0 ? field)) + +#define XLATE(src, srclowbit, outfield) ( \ + (((src) >> (srclowbit)) << (0 ? outfield)) & MASK(outfield)) + +void NVWriteVgaSeq(struct drm_device *, int head, uint8_t index, uint8_t value); +uint8_t NVReadVgaSeq(struct drm_device *, int head, uint8_t index); +void NVWriteVgaGr(struct drm_device *, int head, uint8_t index, uint8_t value); +uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index); +void NVSetOwner(struct drm_device *, int owner); +void NVBlankScreen(struct drm_device *, int head, bool blank); +int nouveau_hw_get_pllvals(struct drm_device *, enum nvbios_pll_type plltype, + struct nouveau_pll_vals *pllvals); +int nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pllvals); +int nouveau_hw_get_clock(struct drm_device *, enum nvbios_pll_type plltype); +void nouveau_hw_save_vga_fonts(struct drm_device *, bool save); +void nouveau_hw_save_state(struct drm_device *, int head, + struct nv04_mode_state *state); +void nouveau_hw_load_state(struct drm_device *, int head, + struct nv04_mode_state *state); +void nouveau_hw_load_state_palette(struct drm_device *, int head, + struct nv04_mode_state *state); + +/* nouveau_calc.c */ +extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp, + int *burst, int *lwm); + +static inline uint32_t NVReadCRTC(struct drm_device *dev, + int head, uint32_t reg) +{ + struct nouveau_device *device = nouveau_dev(dev); + uint32_t val; + if (head) + reg += NV_PCRTC0_SIZE; + val = nv_rd32(device, reg); + return val; +} + +static inline void NVWriteCRTC(struct drm_device *dev, + int head, uint32_t reg, uint32_t val) +{ + struct nouveau_device *device = nouveau_dev(dev); + if (head) + reg += NV_PCRTC0_SIZE; + nv_wr32(device, reg, val); +} + +static inline uint32_t NVReadRAMDAC(struct drm_device *dev, + int head, uint32_t reg) +{ + struct nouveau_device *device = nouveau_dev(dev); + uint32_t val; + if (head) + reg += NV_PRAMDAC0_SIZE; + val = nv_rd32(device, reg); + return val; +} + +static inline void NVWriteRAMDAC(struct drm_device *dev, + int head, uint32_t reg, uint32_t val) +{ + struct nouveau_device *device = nouveau_dev(dev); + if (head) + reg += NV_PRAMDAC0_SIZE; + nv_wr32(device, reg, val); +} + +static inline uint8_t nv_read_tmds(struct drm_device *dev, + int or, int dl, uint8_t address) +{ + int ramdac = (or & DCB_OUTPUT_C) >> 2; + + NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, + NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | address); + return NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8); +} + +static inline void nv_write_tmds(struct drm_device *dev, + int or, int dl, uint8_t address, + uint8_t data) +{ + int ramdac = (or & DCB_OUTPUT_C) >> 2; + + NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8, data); + NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, address); +} + +static inline void NVWriteVgaCrtc(struct drm_device *dev, + int head, uint8_t index, uint8_t value) +{ + struct nouveau_device *device = nouveau_dev(dev); + nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); + nv_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value); +} + +static inline uint8_t NVReadVgaCrtc(struct drm_device *dev, + int head, uint8_t index) +{ + struct nouveau_device *device = nouveau_dev(dev); + uint8_t val; + nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); + val = nv_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); + return val; +} + +/* CR57 and CR58 are a fun pair of regs. CR57 provides an index (0-0xf) for CR58 + * I suspect they in fact do nothing, but are merely a way to carry useful + * per-head variables around + * + * Known uses: + * CR57 CR58 + * 0x00 index to the appropriate dcb entry (or 7f for inactive) + * 0x02 dcb entry's "or" value (or 00 for inactive) + * 0x03 bit0 set for dual link (LVDS, possibly elsewhere too) + * 0x08 or 0x09 pxclk in MHz + * 0x0f laptop panel info - low nibble for PEXTDEV_BOOT_0 strap + * high nibble for xlat strap value + */ + +static inline void +NVWriteVgaCrtc5758(struct drm_device *dev, int head, uint8_t index, uint8_t value) +{ + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index); + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_58, value); +} + +static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_t index) +{ + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index); + return NVReadVgaCrtc(dev, head, NV_CIO_CRE_58); +} + +static inline uint8_t NVReadPRMVIO(struct drm_device *dev, + int head, uint32_t reg) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + uint8_t val; + + /* Only NV4x have two pvio ranges; other twoHeads cards MUST call + * NVSetOwner for the relevant head to be programmed */ + if (head && nv_device(drm->device)->card_type == NV_40) + reg += NV_PRMVIO_SIZE; + + val = nv_rd08(device, reg); + return val; +} + +static inline void NVWritePRMVIO(struct drm_device *dev, + int head, uint32_t reg, uint8_t value) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + + /* Only NV4x have two pvio ranges; other twoHeads cards MUST call + * NVSetOwner for the relevant head to be programmed */ + if (head && nv_device(drm->device)->card_type == NV_40) + reg += NV_PRMVIO_SIZE; + + nv_wr08(device, reg, value); +} + +static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable) +{ + struct nouveau_device *device = nouveau_dev(dev); + nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); + nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); +} + +static inline bool NVGetEnablePalette(struct drm_device *dev, int head) +{ + struct nouveau_device *device = nouveau_dev(dev); + nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); + return !(nv_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); +} + +static inline void NVWriteVgaAttr(struct drm_device *dev, + int head, uint8_t index, uint8_t value) +{ + struct nouveau_device *device = nouveau_dev(dev); + if (NVGetEnablePalette(dev, head)) + index &= ~0x20; + else + index |= 0x20; + + nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); + nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); + nv_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value); +} + +static inline uint8_t NVReadVgaAttr(struct drm_device *dev, + int head, uint8_t index) +{ + struct nouveau_device *device = nouveau_dev(dev); + uint8_t val; + if (NVGetEnablePalette(dev, head)) + index &= ~0x20; + else + index |= 0x20; + + nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); + nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); + val = nv_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE); + return val; +} + +static inline void NVVgaSeqReset(struct drm_device *dev, int head, bool start) +{ + NVWriteVgaSeq(dev, head, NV_VIO_SR_RESET_INDEX, start ? 0x1 : 0x3); +} + +static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect) +{ + uint8_t seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX); + + if (protect) { + NVVgaSeqReset(dev, head, true); + NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20); + } else { + /* Reenable sequencer, then turn on screen */ + NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20); /* reenable display */ + NVVgaSeqReset(dev, head, false); + } + NVSetEnablePalette(dev, head, protect); +} + +static inline bool +nv_heads_tied(struct drm_device *dev) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + + if (nv_device(drm->device)->chipset == 0x11) + return !!(nv_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28)); + + return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4; +} + +/* makes cr0-7 on the specified head read-only */ +static inline bool +nv_lock_vga_crtc_base(struct drm_device *dev, int head, bool lock) +{ + uint8_t cr11 = NVReadVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX); + bool waslocked = cr11 & 0x80; + + if (lock) + cr11 |= 0x80; + else + cr11 &= ~0x80; + NVWriteVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX, cr11); + + return waslocked; +} + +static inline void +nv_lock_vga_crtc_shadow(struct drm_device *dev, int head, int lock) +{ + /* shadow lock: connects 0x60?3d? regs to "real" 0x3d? regs + * bit7: unlocks HDT, HBS, HBE, HRS, HRE, HEB + * bit6: seems to have some effect on CR09 (double scan, VBS_9) + * bit5: unlocks HDE + * bit4: unlocks VDE + * bit3: unlocks VDT, OVL, VRS, ?VRE?, VBS, VBE, LSR, EBR + * bit2: same as bit 1 of 0x60?804 + * bit0: same as bit 0 of 0x60?804 + */ + + uint8_t cr21 = lock; + + if (lock < 0) + /* 0xfa is generic "unlock all" mask */ + cr21 = NVReadVgaCrtc(dev, head, NV_CIO_CRE_21) | 0xfa; + + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_21, cr21); +} + +/* renders the extended crtc regs (cr19+) on all crtcs impervious: + * immutable and unreadable + */ +static inline bool +NVLockVgaCrtcs(struct drm_device *dev, bool lock) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + bool waslocked = !NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX); + + NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX, + lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE); + /* NV11 has independently lockable extended crtcs, except when tied */ + if (nv_device(drm->device)->chipset == 0x11 && !nv_heads_tied(dev)) + NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX, + lock ? NV_CIO_SR_LOCK_VALUE : + NV_CIO_SR_UNLOCK_RW_VALUE); + + return waslocked; +} + +/* nv04 cursor max dimensions of 32x32 (A1R5G5B5) */ +#define NV04_CURSOR_SIZE 32 +/* limit nv10 cursors to 64x64 (ARGB8) (we could go to 64x255) */ +#define NV10_CURSOR_SIZE 64 + +static inline int nv_cursor_width(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + return nv_device(drm->device)->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE; +} + +static inline void +nv_fix_nv40_hw_cursor(struct drm_device *dev, int head) +{ + /* on some nv40 (such as the "true" (in the NV_PFB_BOOT_0 sense) nv40, + * the gf6800gt) a hardware bug requires a write to PRAMDAC_CURSOR_POS + * for changes to the CRTC CURCTL regs to take effect, whether changing + * the pixmap location, or just showing/hiding the cursor + */ + uint32_t curpos = NVReadRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS, curpos); +} + +static inline void +nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + NVWriteCRTC(dev, head, NV_PCRTC_START, offset); + + if (nv_device(drm->device)->card_type == NV_04) { + /* + * Hilarious, the 24th bit doesn't want to stick to + * PCRTC_START... + */ + int cre_heb = NVReadVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX); + + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX, + (cre_heb & ~0x40) | ((offset >> 18) & 0x40)); + } +} + +static inline void +nv_show_cursor(struct drm_device *dev, int head, bool show) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + uint8_t *curctl1 = + &nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX]; + + if (show) + *curctl1 |= MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE); + else + *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE); + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1); + + if (nv_device(drm->device)->card_type == NV_40) + nv_fix_nv40_hw_cursor(dev, head); +} + +static inline uint32_t +nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + int mask; + + if (bpp == 15) + bpp = 16; + if (bpp == 24) + bpp = 8; + + /* Alignment requirements taken from the Haiku driver */ + if (nv_device(drm->device)->card_type == NV_04) + mask = 128 / bpp - 1; + else + mask = 512 / bpp - 1; + + return (width + mask) & ~mask; +} + +#endif /* __NOUVEAU_HW_H__ */ diff --git a/drivers/gpu/drm/nouveau/dispnv04/nvreg.h b/drivers/gpu/drm/nouveau/dispnv04/nvreg.h new file mode 100644 index 0000000..bbfb1a6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv04/nvreg.h @@ -0,0 +1,517 @@ +/* $XConsortium: nvreg.h /main/2 1996/10/28 05:13:41 kaleb $ */ +/* + * Copyright 1996-1997 David J. McKay + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * DAVID J. MCKAY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF + * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/nvreg.h,v 1.6 2002/01/25 21:56:06 tsi Exp $ */ + +#ifndef __NVREG_H_ +#define __NVREG_H_ + +#define NV_PMC_OFFSET 0x00000000 +#define NV_PMC_SIZE 0x00001000 + +#define NV_PBUS_OFFSET 0x00001000 +#define NV_PBUS_SIZE 0x00001000 + +#define NV_PFIFO_OFFSET 0x00002000 +#define NV_PFIFO_SIZE 0x00002000 + +#define NV_HDIAG_OFFSET 0x00005000 +#define NV_HDIAG_SIZE 0x00001000 + +#define NV_PRAM_OFFSET 0x00006000 +#define NV_PRAM_SIZE 0x00001000 + +#define NV_PVIDEO_OFFSET 0x00008000 +#define NV_PVIDEO_SIZE 0x00001000 + +#define NV_PTIMER_OFFSET 0x00009000 +#define NV_PTIMER_SIZE 0x00001000 + +#define NV_PPM_OFFSET 0x0000A000 +#define NV_PPM_SIZE 0x00001000 + +#define NV_PTV_OFFSET 0x0000D000 +#define NV_PTV_SIZE 0x00001000 + +#define NV_PRMVGA_OFFSET 0x000A0000 +#define NV_PRMVGA_SIZE 0x00020000 + +#define NV_PRMVIO0_OFFSET 0x000C0000 +#define NV_PRMVIO_SIZE 0x00002000 +#define NV_PRMVIO1_OFFSET 0x000C2000 + +#define NV_PFB_OFFSET 0x00100000 +#define NV_PFB_SIZE 0x00001000 + +#define NV_PEXTDEV_OFFSET 0x00101000 +#define NV_PEXTDEV_SIZE 0x00001000 + +#define NV_PME_OFFSET 0x00200000 +#define NV_PME_SIZE 0x00001000 + +#define NV_PROM_OFFSET 0x00300000 +#define NV_PROM_SIZE 0x00010000 + +#define NV_PGRAPH_OFFSET 0x00400000 +#define NV_PGRAPH_SIZE 0x00010000 + +#define NV_PCRTC0_OFFSET 0x00600000 +#define NV_PCRTC0_SIZE 0x00002000 /* empirical */ + +#define NV_PRMCIO0_OFFSET 0x00601000 +#define NV_PRMCIO_SIZE 0x00002000 +#define NV_PRMCIO1_OFFSET 0x00603000 + +#define NV50_DISPLAY_OFFSET 0x00610000 +#define NV50_DISPLAY_SIZE 0x0000FFFF + +#define NV_PRAMDAC0_OFFSET 0x00680000 +#define NV_PRAMDAC0_SIZE 0x00002000 + +#define NV_PRMDIO0_OFFSET 0x00681000 +#define NV_PRMDIO_SIZE 0x00002000 +#define NV_PRMDIO1_OFFSET 0x00683000 + +#define NV_PRAMIN_OFFSET 0x00700000 +#define NV_PRAMIN_SIZE 0x00100000 + +#define NV_FIFO_OFFSET 0x00800000 +#define NV_FIFO_SIZE 0x00800000 + +#define NV_PMC_BOOT_0 0x00000000 +#define NV_PMC_ENABLE 0x00000200 + +#define NV_VIO_VSE2 0x000003c3 +#define NV_VIO_SRX 0x000003c4 + +#define NV_CIO_CRX__COLOR 0x000003d4 +#define NV_CIO_CR__COLOR 0x000003d5 + +#define NV_PBUS_DEBUG_1 0x00001084 +#define NV_PBUS_DEBUG_4 0x00001098 +#define NV_PBUS_DEBUG_DUALHEAD_CTL 0x000010f0 +#define NV_PBUS_POWERCTRL_1 0x00001584 +#define NV_PBUS_POWERCTRL_2 0x00001588 +#define NV_PBUS_POWERCTRL_4 0x00001590 +#define NV_PBUS_PCI_NV_19 0x0000184C +#define NV_PBUS_PCI_NV_20 0x00001850 +# define NV_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED (0 << 0) +# define NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED (1 << 0) + +#define NV_PFIFO_RAMHT 0x00002210 + +#define NV_PTV_TV_INDEX 0x0000d220 +#define NV_PTV_TV_DATA 0x0000d224 +#define NV_PTV_HFILTER 0x0000d310 +#define NV_PTV_HFILTER2 0x0000d390 +#define NV_PTV_VFILTER 0x0000d510 + +#define NV_PRMVIO_MISC__WRITE 0x000c03c2 +#define NV_PRMVIO_SRX 0x000c03c4 +#define NV_PRMVIO_SR 0x000c03c5 +# define NV_VIO_SR_RESET_INDEX 0x00 +# define NV_VIO_SR_CLOCK_INDEX 0x01 +# define NV_VIO_SR_PLANE_MASK_INDEX 0x02 +# define NV_VIO_SR_CHAR_MAP_INDEX 0x03 +# define NV_VIO_SR_MEM_MODE_INDEX 0x04 +#define NV_PRMVIO_MISC__READ 0x000c03cc +#define NV_PRMVIO_GRX 0x000c03ce +#define NV_PRMVIO_GX 0x000c03cf +# define NV_VIO_GX_SR_INDEX 0x00 +# define NV_VIO_GX_SREN_INDEX 0x01 +# define NV_VIO_GX_CCOMP_INDEX 0x02 +# define NV_VIO_GX_ROP_INDEX 0x03 +# define NV_VIO_GX_READ_MAP_INDEX 0x04 +# define NV_VIO_GX_MODE_INDEX 0x05 +# define NV_VIO_GX_MISC_INDEX 0x06 +# define NV_VIO_GX_DONT_CARE_INDEX 0x07 +# define NV_VIO_GX_BIT_MASK_INDEX 0x08 + +#define NV_PCRTC_INTR_0 0x00600100 +# define NV_PCRTC_INTR_0_VBLANK (1 << 0) +#define NV_PCRTC_INTR_EN_0 0x00600140 +#define NV_PCRTC_START 0x00600800 +#define NV_PCRTC_CONFIG 0x00600804 +# define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0) +# define NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC (4 << 0) +# define NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0) +#define NV_PCRTC_CURSOR_CONFIG 0x00600810 +# define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0) +# define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4) +# define NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM (1 << 8) +# define NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32 (1 << 12) +# define NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 (1 << 16) +# define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_32 (2 << 24) +# define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 (4 << 24) +# define NV_PCRTC_CURSOR_CONFIG_CUR_BLEND_ALPHA (1 << 28) + +/* note: PCRTC_GPIO is not available on nv10, and in fact aliases 0x600810 */ +#define NV_PCRTC_GPIO 0x00600818 +#define NV_PCRTC_GPIO_EXT 0x0060081c +#define NV_PCRTC_830 0x00600830 +#define NV_PCRTC_834 0x00600834 +#define NV_PCRTC_850 0x00600850 +#define NV_PCRTC_ENGINE_CTRL 0x00600860 +# define NV_CRTC_FSEL_I2C (1 << 4) +# define NV_CRTC_FSEL_OVERLAY (1 << 12) + +#define NV_PRMCIO_ARX 0x006013c0 +#define NV_PRMCIO_AR__WRITE 0x006013c0 +#define NV_PRMCIO_AR__READ 0x006013c1 +# define NV_CIO_AR_MODE_INDEX 0x10 +# define NV_CIO_AR_OSCAN_INDEX 0x11 +# define NV_CIO_AR_PLANE_INDEX 0x12 +# define NV_CIO_AR_HPP_INDEX 0x13 +# define NV_CIO_AR_CSEL_INDEX 0x14 +#define NV_PRMCIO_INP0 0x006013c2 +#define NV_PRMCIO_CRX__COLOR 0x006013d4 +#define NV_PRMCIO_CR__COLOR 0x006013d5 + /* Standard VGA CRTC registers */ +# define NV_CIO_CR_HDT_INDEX 0x00 /* horizontal display total */ +# define NV_CIO_CR_HDE_INDEX 0x01 /* horizontal display end */ +# define NV_CIO_CR_HBS_INDEX 0x02 /* horizontal blanking start */ +# define NV_CIO_CR_HBE_INDEX 0x03 /* horizontal blanking end */ +# define NV_CIO_CR_HBE_4_0 4:0 +# define NV_CIO_CR_HRS_INDEX 0x04 /* horizontal retrace start */ +# define NV_CIO_CR_HRE_INDEX 0x05 /* horizontal retrace end */ +# define NV_CIO_CR_HRE_4_0 4:0 +# define NV_CIO_CR_HRE_HBE_5 7:7 +# define NV_CIO_CR_VDT_INDEX 0x06 /* vertical display total */ +# define NV_CIO_CR_OVL_INDEX 0x07 /* overflow bits */ +# define NV_CIO_CR_OVL_VDT_8 0:0 +# define NV_CIO_CR_OVL_VDE_8 1:1 +# define NV_CIO_CR_OVL_VRS_8 2:2 +# define NV_CIO_CR_OVL_VBS_8 3:3 +# define NV_CIO_CR_OVL_VDT_9 5:5 +# define NV_CIO_CR_OVL_VDE_9 6:6 +# define NV_CIO_CR_OVL_VRS_9 7:7 +# define NV_CIO_CR_RSAL_INDEX 0x08 /* normally "preset row scan" */ +# define NV_CIO_CR_CELL_HT_INDEX 0x09 /* cell height?! normally "max scan line" */ +# define NV_CIO_CR_CELL_HT_VBS_9 5:5 +# define NV_CIO_CR_CELL_HT_SCANDBL 7:7 +# define NV_CIO_CR_CURS_ST_INDEX 0x0a /* cursor start */ +# define NV_CIO_CR_CURS_END_INDEX 0x0b /* cursor end */ +# define NV_CIO_CR_SA_HI_INDEX 0x0c /* screen start address high */ +# define NV_CIO_CR_SA_LO_INDEX 0x0d /* screen start address low */ +# define NV_CIO_CR_TCOFF_HI_INDEX 0x0e /* cursor offset high */ +# define NV_CIO_CR_TCOFF_LO_INDEX 0x0f /* cursor offset low */ +# define NV_CIO_CR_VRS_INDEX 0x10 /* vertical retrace start */ +# define NV_CIO_CR_VRE_INDEX 0x11 /* vertical retrace end */ +# define NV_CIO_CR_VRE_3_0 3:0 +# define NV_CIO_CR_VDE_INDEX 0x12 /* vertical display end */ +# define NV_CIO_CR_OFFSET_INDEX 0x13 /* sets screen pitch */ +# define NV_CIO_CR_ULINE_INDEX 0x14 /* underline location */ +# define NV_CIO_CR_VBS_INDEX 0x15 /* vertical blank start */ +# define NV_CIO_CR_VBE_INDEX 0x16 /* vertical blank end */ +# define NV_CIO_CR_MODE_INDEX 0x17 /* crtc mode control */ +# define NV_CIO_CR_LCOMP_INDEX 0x18 /* line compare */ + /* Extended VGA CRTC registers */ +# define NV_CIO_CRE_RPC0_INDEX 0x19 /* repaint control 0 */ +# define NV_CIO_CRE_RPC0_OFFSET_10_8 7:5 +# define NV_CIO_CRE_RPC1_INDEX 0x1a /* repaint control 1 */ +# define NV_CIO_CRE_RPC1_LARGE 2:2 +# define NV_CIO_CRE_FF_INDEX 0x1b /* fifo control */ +# define NV_CIO_CRE_ENH_INDEX 0x1c /* enhanced? */ +# define NV_CIO_SR_LOCK_INDEX 0x1f /* crtc lock */ +# define NV_CIO_SR_UNLOCK_RW_VALUE 0x57 +# define NV_CIO_SR_LOCK_VALUE 0x99 +# define NV_CIO_CRE_FFLWM__INDEX 0x20 /* fifo low water mark */ +# define NV_CIO_CRE_21 0x21 /* vga shadow crtc lock */ +# define NV_CIO_CRE_LSR_INDEX 0x25 /* ? */ +# define NV_CIO_CRE_LSR_VDT_10 0:0 +# define NV_CIO_CRE_LSR_VDE_10 1:1 +# define NV_CIO_CRE_LSR_VRS_10 2:2 +# define NV_CIO_CRE_LSR_VBS_10 3:3 +# define NV_CIO_CRE_LSR_HBE_6 4:4 +# define NV_CIO_CR_ARX_INDEX 0x26 /* attribute index -- ro copy of 0x60.3c0 */ +# define NV_CIO_CRE_CHIP_ID_INDEX 0x27 /* chip revision */ +# define NV_CIO_CRE_PIXEL_INDEX 0x28 +# define NV_CIO_CRE_PIXEL_FORMAT 1:0 +# define NV_CIO_CRE_HEB__INDEX 0x2d /* horizontal extra bits? */ +# define NV_CIO_CRE_HEB_HDT_8 0:0 +# define NV_CIO_CRE_HEB_HDE_8 1:1 +# define NV_CIO_CRE_HEB_HBS_8 2:2 +# define NV_CIO_CRE_HEB_HRS_8 3:3 +# define NV_CIO_CRE_HEB_ILC_8 4:4 +# define NV_CIO_CRE_2E 0x2e /* some scratch or dummy reg to force writes to sink in */ +# define NV_CIO_CRE_HCUR_ADDR2_INDEX 0x2f /* cursor */ +# define NV_CIO_CRE_HCUR_ADDR0_INDEX 0x30 /* pixmap */ +# define NV_CIO_CRE_HCUR_ADDR0_ADR 6:0 +# define NV_CIO_CRE_HCUR_ASI 7:7 +# define NV_CIO_CRE_HCUR_ADDR1_INDEX 0x31 /* address */ +# define NV_CIO_CRE_HCUR_ADDR1_ENABLE 0:0 +# define NV_CIO_CRE_HCUR_ADDR1_CUR_DBL 1:1 +# define NV_CIO_CRE_HCUR_ADDR1_ADR 7:2 +# define NV_CIO_CRE_LCD__INDEX 0x33 +# define NV_CIO_CRE_LCD_LCD_SELECT 0:0 +# define NV_CIO_CRE_LCD_ROUTE_MASK 0x3b +# define NV_CIO_CRE_DDC0_STATUS__INDEX 0x36 +# define NV_CIO_CRE_DDC0_WR__INDEX 0x37 +# define NV_CIO_CRE_ILACE__INDEX 0x39 /* interlace */ +# define NV_CIO_CRE_SCRATCH3__INDEX 0x3b +# define NV_CIO_CRE_SCRATCH4__INDEX 0x3c +# define NV_CIO_CRE_DDC_STATUS__INDEX 0x3e +# define NV_CIO_CRE_DDC_WR__INDEX 0x3f +# define NV_CIO_CRE_EBR_INDEX 0x41 /* extra bits ? (vertical) */ +# define NV_CIO_CRE_EBR_VDT_11 0:0 +# define NV_CIO_CRE_EBR_VDE_11 2:2 +# define NV_CIO_CRE_EBR_VRS_11 4:4 +# define NV_CIO_CRE_EBR_VBS_11 6:6 +# define NV_CIO_CRE_42 0x42 +# define NV_CIO_CRE_42_OFFSET_11 6:6 +# define NV_CIO_CRE_43 0x43 +# define NV_CIO_CRE_44 0x44 /* head control */ +# define NV_CIO_CRE_CSB 0x45 /* colour saturation boost */ +# define NV_CIO_CRE_RCR 0x46 +# define NV_CIO_CRE_RCR_ENDIAN_BIG 7:7 +# define NV_CIO_CRE_47 0x47 /* extended fifo lwm, used on nv30+ */ +# define NV_CIO_CRE_49 0x49 +# define NV_CIO_CRE_4B 0x4b /* given patterns in 0x[2-3][a-c] regs, probably scratch 6 */ +# define NV_CIO_CRE_TVOUT_LATENCY 0x52 +# define NV_CIO_CRE_53 0x53 /* `fp_htiming' according to Haiku */ +# define NV_CIO_CRE_54 0x54 /* `fp_vtiming' according to Haiku */ +# define NV_CIO_CRE_57 0x57 /* index reg for cr58 */ +# define NV_CIO_CRE_58 0x58 /* data reg for cr57 */ +# define NV_CIO_CRE_59 0x59 /* related to on/off-chip-ness of digital outputs */ +# define NV_CIO_CRE_5B 0x5B /* newer colour saturation reg */ +# define NV_CIO_CRE_85 0x85 +# define NV_CIO_CRE_86 0x86 +#define NV_PRMCIO_INP0__COLOR 0x006013da + +#define NV_PRAMDAC_CU_START_POS 0x00680300 +# define NV_PRAMDAC_CU_START_POS_X 15:0 +# define NV_PRAMDAC_CU_START_POS_Y 31:16 +#define NV_RAMDAC_NV10_CURSYNC 0x00680404 + +#define NV_PRAMDAC_NVPLL_COEFF 0x00680500 +#define NV_PRAMDAC_MPLL_COEFF 0x00680504 +#define NV_PRAMDAC_VPLL_COEFF 0x00680508 +# define NV30_RAMDAC_ENABLE_VCO2 (8 << 4) + +#define NV_PRAMDAC_PLL_COEFF_SELECT 0x0068050c +# define NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE (4 << 0) +# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL (1 << 8) +# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL (2 << 8) +# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL (4 << 8) +# define NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2 (8 << 8) +# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 (1 << 16) +# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1 (2 << 16) +# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 (4 << 16) +# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2 (8 << 16) +# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_CLK_SOURCE_VIP (1 << 20) +# define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2 (1 << 28) +# define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2 (2 << 28) + +#define NV_PRAMDAC_PLL_SETUP_CONTROL 0x00680510 +#define NV_RAMDAC_VPLL2 0x00680520 +#define NV_PRAMDAC_SEL_CLK 0x00680524 +#define NV_RAMDAC_DITHER_NV11 0x00680528 +#define NV_PRAMDAC_DACCLK 0x0068052c +# define NV_PRAMDAC_DACCLK_SEL_DACCLK (1 << 0) + +#define NV_RAMDAC_NVPLL_B 0x00680570 +#define NV_RAMDAC_MPLL_B 0x00680574 +#define NV_RAMDAC_VPLL_B 0x00680578 +#define NV_RAMDAC_VPLL2_B 0x0068057c +# define NV31_RAMDAC_ENABLE_VCO2 (8 << 28) +#define NV_PRAMDAC_580 0x00680580 +# define NV_RAMDAC_580_VPLL1_ACTIVE (1 << 8) +# define NV_RAMDAC_580_VPLL2_ACTIVE (1 << 28) + +#define NV_PRAMDAC_GENERAL_CONTROL 0x00680600 +# define NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON (3 << 4) +# define NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL (1 << 8) +# define NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL (1 << 12) +# define NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM (2 << 16) +# define NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS (1 << 20) +# define NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG (2 << 28) +#define NV_PRAMDAC_TEST_CONTROL 0x00680608 +# define NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED (1 << 12) +# define NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF (1 << 16) +# define NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI (1 << 28) +#define NV_PRAMDAC_TESTPOINT_DATA 0x00680610 +# define NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK (8 << 28) +#define NV_PRAMDAC_630 0x00680630 +#define NV_PRAMDAC_634 0x00680634 + +#define NV_PRAMDAC_TV_SETUP 0x00680700 +#define NV_PRAMDAC_TV_VTOTAL 0x00680720 +#define NV_PRAMDAC_TV_VSKEW 0x00680724 +#define NV_PRAMDAC_TV_VSYNC_DELAY 0x00680728 +#define NV_PRAMDAC_TV_HTOTAL 0x0068072c +#define NV_PRAMDAC_TV_HSKEW 0x00680730 +#define NV_PRAMDAC_TV_HSYNC_DELAY 0x00680734 +#define NV_PRAMDAC_TV_HSYNC_DELAY2 0x00680738 + +#define NV_PRAMDAC_TV_SETUP 0x00680700 + +#define NV_PRAMDAC_FP_VDISPLAY_END 0x00680800 +#define NV_PRAMDAC_FP_VTOTAL 0x00680804 +#define NV_PRAMDAC_FP_VCRTC 0x00680808 +#define NV_PRAMDAC_FP_VSYNC_START 0x0068080c +#define NV_PRAMDAC_FP_VSYNC_END 0x00680810 +#define NV_PRAMDAC_FP_VVALID_START 0x00680814 +#define NV_PRAMDAC_FP_VVALID_END 0x00680818 +#define NV_PRAMDAC_FP_HDISPLAY_END 0x00680820 +#define NV_PRAMDAC_FP_HTOTAL 0x00680824 +#define NV_PRAMDAC_FP_HCRTC 0x00680828 +#define NV_PRAMDAC_FP_HSYNC_START 0x0068082c +#define NV_PRAMDAC_FP_HSYNC_END 0x00680830 +#define NV_PRAMDAC_FP_HVALID_START 0x00680834 +#define NV_PRAMDAC_FP_HVALID_END 0x00680838 + +#define NV_RAMDAC_FP_DITHER 0x0068083c +#define NV_PRAMDAC_FP_TG_CONTROL 0x00680848 +# define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS (1 << 0) +# define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE (2 << 0) +# define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS (1 << 4) +# define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE (2 << 4) +# define NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE (0 << 8) +# define NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER (1 << 8) +# define NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE (2 << 8) +# define NV_PRAMDAC_FP_TG_CONTROL_READ_PROG (1 << 20) +# define NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 (1 << 24) +# define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS (1 << 28) +# define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE (2 << 28) +#define NV_PRAMDAC_FP_MARGIN_COLOR 0x0068084c +#define NV_PRAMDAC_850 0x00680850 +#define NV_PRAMDAC_85C 0x0068085c +#define NV_PRAMDAC_FP_DEBUG_0 0x00680880 +# define NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE (1 << 0) +# define NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE (1 << 4) +/* This doesn't seem to be essential for tmds, but still often set */ +# define NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED (8 << 4) +# define NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR (1 << 8) +# define NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR (1 << 12) +# define NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND (1 << 20) +# define NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND (1 << 24) +# define NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK (1 << 28) +#define NV_PRAMDAC_FP_DEBUG_1 0x00680884 +# define NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE 11:0 +# define NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE (1 << 12) +# define NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE 27:16 +# define NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE (1 << 28) +#define NV_PRAMDAC_FP_DEBUG_2 0x00680888 +#define NV_PRAMDAC_FP_DEBUG_3 0x0068088C + +/* see NV_PRAMDAC_INDIR_TMDS in rules.xml */ +#define NV_PRAMDAC_FP_TMDS_CONTROL 0x006808b0 +# define NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE (1 << 16) +#define NV_PRAMDAC_FP_TMDS_DATA 0x006808b4 + +#define NV_PRAMDAC_8C0 0x006808c0 + +/* Some kind of switch */ +#define NV_PRAMDAC_900 0x00680900 +#define NV_PRAMDAC_A20 0x00680A20 +#define NV_PRAMDAC_A24 0x00680A24 +#define NV_PRAMDAC_A34 0x00680A34 + +#define NV_PRAMDAC_CTV 0x00680c00 + +/* names fabricated from NV_USER_DAC info */ +#define NV_PRMDIO_PIXEL_MASK 0x006813c6 +# define NV_PRMDIO_PIXEL_MASK_MASK 0xff +#define NV_PRMDIO_READ_MODE_ADDRESS 0x006813c7 +#define NV_PRMDIO_WRITE_MODE_ADDRESS 0x006813c8 +#define NV_PRMDIO_PALETTE_DATA 0x006813c9 + +#define NV_PGRAPH_DEBUG_0 0x00400080 +#define NV_PGRAPH_DEBUG_1 0x00400084 +#define NV_PGRAPH_DEBUG_2_NV04 0x00400088 +#define NV_PGRAPH_DEBUG_2 0x00400620 +#define NV_PGRAPH_DEBUG_3 0x0040008c +#define NV_PGRAPH_DEBUG_4 0x00400090 +#define NV_PGRAPH_INTR 0x00400100 +#define NV_PGRAPH_INTR_EN 0x00400140 +#define NV_PGRAPH_CTX_CONTROL 0x00400144 +#define NV_PGRAPH_CTX_CONTROL_NV04 0x00400170 +#define NV_PGRAPH_ABS_UCLIP_XMIN 0x0040053C +#define NV_PGRAPH_ABS_UCLIP_YMIN 0x00400540 +#define NV_PGRAPH_ABS_UCLIP_XMAX 0x00400544 +#define NV_PGRAPH_ABS_UCLIP_YMAX 0x00400548 +#define NV_PGRAPH_BETA_AND 0x00400608 +#define NV_PGRAPH_LIMIT_VIOL_PIX 0x00400610 +#define NV_PGRAPH_BOFFSET0 0x00400640 +#define NV_PGRAPH_BOFFSET1 0x00400644 +#define NV_PGRAPH_BOFFSET2 0x00400648 +#define NV_PGRAPH_BLIMIT0 0x00400684 +#define NV_PGRAPH_BLIMIT1 0x00400688 +#define NV_PGRAPH_BLIMIT2 0x0040068c +#define NV_PGRAPH_STATUS 0x00400700 +#define NV_PGRAPH_SURFACE 0x00400710 +#define NV_PGRAPH_STATE 0x00400714 +#define NV_PGRAPH_FIFO 0x00400720 +#define NV_PGRAPH_PATTERN_SHAPE 0x00400810 +#define NV_PGRAPH_TILE 0x00400b00 + +#define NV_PVIDEO_INTR_EN 0x00008140 +#define NV_PVIDEO_BUFFER 0x00008700 +#define NV_PVIDEO_STOP 0x00008704 +#define NV_PVIDEO_UVPLANE_BASE(buff) (0x00008800+(buff)*4) +#define NV_PVIDEO_UVPLANE_LIMIT(buff) (0x00008808+(buff)*4) +#define NV_PVIDEO_UVPLANE_OFFSET_BUFF(buff) (0x00008820+(buff)*4) +#define NV_PVIDEO_BASE(buff) (0x00008900+(buff)*4) +#define NV_PVIDEO_LIMIT(buff) (0x00008908+(buff)*4) +#define NV_PVIDEO_LUMINANCE(buff) (0x00008910+(buff)*4) +#define NV_PVIDEO_CHROMINANCE(buff) (0x00008918+(buff)*4) +#define NV_PVIDEO_OFFSET_BUFF(buff) (0x00008920+(buff)*4) +#define NV_PVIDEO_SIZE_IN(buff) (0x00008928+(buff)*4) +#define NV_PVIDEO_POINT_IN(buff) (0x00008930+(buff)*4) +#define NV_PVIDEO_DS_DX(buff) (0x00008938+(buff)*4) +#define NV_PVIDEO_DT_DY(buff) (0x00008940+(buff)*4) +#define NV_PVIDEO_POINT_OUT(buff) (0x00008948+(buff)*4) +#define NV_PVIDEO_SIZE_OUT(buff) (0x00008950+(buff)*4) +#define NV_PVIDEO_FORMAT(buff) (0x00008958+(buff)*4) +# define NV_PVIDEO_FORMAT_PLANAR (1 << 0) +# define NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8 (1 << 16) +# define NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY (1 << 20) +# define NV_PVIDEO_FORMAT_MATRIX_ITURBT709 (1 << 24) +#define NV_PVIDEO_COLOR_KEY 0x00008B00 + +/* NV04 overlay defines from VIDIX & Haiku */ +#define NV_PVIDEO_INTR_EN_0 0x00680140 +#define NV_PVIDEO_STEP_SIZE 0x00680200 +#define NV_PVIDEO_CONTROL_Y 0x00680204 +#define NV_PVIDEO_CONTROL_X 0x00680208 +#define NV_PVIDEO_BUFF0_START_ADDRESS 0x0068020c +#define NV_PVIDEO_BUFF0_PITCH_LENGTH 0x00680214 +#define NV_PVIDEO_BUFF0_OFFSET 0x0068021c +#define NV_PVIDEO_BUFF1_START_ADDRESS 0x00680210 +#define NV_PVIDEO_BUFF1_PITCH_LENGTH 0x00680218 +#define NV_PVIDEO_BUFF1_OFFSET 0x00680220 +#define NV_PVIDEO_OE_STATE 0x00680224 +#define NV_PVIDEO_SU_STATE 0x00680228 +#define NV_PVIDEO_RM_STATE 0x0068022c +#define NV_PVIDEO_WINDOW_START 0x00680230 +#define NV_PVIDEO_WINDOW_SIZE 0x00680234 +#define NV_PVIDEO_FIFO_THRES_SIZE 0x00680238 +#define NV_PVIDEO_FIFO_BURST_LENGTH 0x0068023c +#define NV_PVIDEO_KEY 0x00680240 +#define NV_PVIDEO_OVERLAY 0x00680244 +#define NV_PVIDEO_RED_CSC_OFFSET 0x00680280 +#define NV_PVIDEO_GREEN_CSC_OFFSET 0x00680284 +#define NV_PVIDEO_BLUE_CSC_OFFSET 0x00680288 +#define NV_PVIDEO_CSC_ADJUST 0x0068028c + +#endif diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c new file mode 100644 index 0000000..08c6f5e --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c @@ -0,0 +1,592 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include "nouveau_drm.h" +#include "nouveau_encoder.h" +#include "nouveau_crtc.h" +#include "hw.h" +#include "tvnv17.h" + +char *nv17_tv_norm_names[NUM_TV_NORMS] = { + [TV_NORM_PAL] = "PAL", + [TV_NORM_PAL_M] = "PAL-M", + [TV_NORM_PAL_N] = "PAL-N", + [TV_NORM_PAL_NC] = "PAL-Nc", + [TV_NORM_NTSC_M] = "NTSC-M", + [TV_NORM_NTSC_J] = "NTSC-J", + [TV_NORM_HD480I] = "hd480i", + [TV_NORM_HD480P] = "hd480p", + [TV_NORM_HD576I] = "hd576i", + [TV_NORM_HD576P] = "hd576p", + [TV_NORM_HD720P] = "hd720p", + [TV_NORM_HD1080I] = "hd1080i" +}; + +/* TV standard specific parameters */ + +struct nv17_tv_norm_params nv17_tv_norms[NUM_TV_NORMS] = { + [TV_NORM_PAL] = { TV_ENC_MODE, { + .tv_enc_mode = { 720, 576, 50000, { + 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18, + 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3, + 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c, + 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3, + 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5, + 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0, + 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b, + 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0 + } } } }, + + [TV_NORM_PAL_M] = { TV_ENC_MODE, { + .tv_enc_mode = { 720, 480, 59940, { + 0x21, 0xe6, 0xef, 0xe3, 0x0, 0x0, 0xb, 0x18, + 0x7e, 0x44, 0x76, 0x32, 0x25, 0x0, 0x3c, 0x0, + 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83, + 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, + 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5, + 0x0, 0x18, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0, + 0x0, 0xb4, 0x0, 0x15, 0x40, 0x10, 0x0, 0x9c, + 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 + } } } }, + + [TV_NORM_PAL_N] = { TV_ENC_MODE, { + .tv_enc_mode = { 720, 576, 50000, { + 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18, + 0x7e, 0x40, 0x8a, 0x32, 0x25, 0x0, 0x3c, 0x0, + 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c, + 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, + 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5, + 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0, + 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b, + 0xbd, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 + } } } }, + + [TV_NORM_PAL_NC] = { TV_ENC_MODE, { + .tv_enc_mode = { 720, 576, 50000, { + 0x21, 0xf6, 0x94, 0x46, 0x0, 0x0, 0xb, 0x18, + 0x7e, 0x44, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3, + 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c, + 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3, + 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5, + 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0, + 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b, + 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0 + } } } }, + + [TV_NORM_NTSC_M] = { TV_ENC_MODE, { + .tv_enc_mode = { 720, 480, 59940, { + 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18, + 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x3c, 0x0, + 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83, + 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, + 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5, + 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0, + 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0x9c, + 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 + } } } }, + + [TV_NORM_NTSC_J] = { TV_ENC_MODE, { + .tv_enc_mode = { 720, 480, 59940, { + 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18, + 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0, + 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83, + 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, + 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5, + 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0, + 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4, + 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 + } } } }, + + [TV_NORM_HD480I] = { TV_ENC_MODE, { + .tv_enc_mode = { 720, 480, 59940, { + 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18, + 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0, + 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83, + 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1, + 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5, + 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0, + 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4, + 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0 + } } } }, + + [TV_NORM_HD576I] = { TV_ENC_MODE, { + .tv_enc_mode = { 720, 576, 50000, { + 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18, + 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3, + 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c, + 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3, + 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5, + 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0, + 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b, + 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0 + } } } }, + + + [TV_NORM_HD480P] = { CTV_ENC_MODE, { + .ctv_enc_mode = { + .mode = { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, + 720, 735, 743, 858, 0, 480, 490, 494, 525, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314, + 0x354003a, 0x40000, 0x6f0344, 0x18100000, + 0x10160004, 0x10060005, 0x1006000c, 0x10060020, + 0x10060021, 0x140e0022, 0x10060202, 0x1802020a, + 0x1810020b, 0x10000fff, 0x10000fff, 0x10000fff, + 0x10000fff, 0x10000fff, 0x10000fff, 0x70, + 0x3ff0000, 0x57, 0x2e001e, 0x258012c, + 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300, + 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400 + } } } }, + + [TV_NORM_HD576P] = { CTV_ENC_MODE, { + .ctv_enc_mode = { + .mode = { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, + 720, 730, 738, 864, 0, 576, 581, 585, 625, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314, + 0x354003a, 0x40000, 0x6f0344, 0x18100000, + 0x10060001, 0x10060009, 0x10060026, 0x10060027, + 0x140e0028, 0x10060268, 0x1810026d, 0x10000fff, + 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, + 0x10000fff, 0x10000fff, 0x10000fff, 0x69, + 0x3ff0000, 0x57, 0x2e001e, 0x258012c, + 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300, + 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400 + } } } }, + + [TV_NORM_HD720P] = { CTV_ENC_MODE, { + .ctv_enc_mode = { + .mode = { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, + 1280, 1349, 1357, 1650, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + .ctv_regs = { 0x1260394, 0x0, 0x0, 0x622, + 0x66b0021, 0x6004a, 0x1210626, 0x8170000, + 0x70004, 0x70016, 0x70017, 0x40f0018, + 0x702e8, 0x81702ed, 0xfff, 0xfff, + 0xfff, 0xfff, 0xfff, 0xfff, + 0xfff, 0xfff, 0xfff, 0x0, + 0x2e40001, 0x58, 0x2e001e, 0x258012c, + 0xa0aa04ec, 0x30, 0x810c0039, 0x12c0300, + 0xc0002039, 0x600, 0x32060039, 0x0, 0x0, 0x0 + } } } }, + + [TV_NORM_HD1080I] = { CTV_ENC_MODE, { + .ctv_enc_mode = { + .mode = { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, + 1920, 1961, 2049, 2200, 0, 1080, 1084, 1088, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC + | DRM_MODE_FLAG_INTERLACE) }, + .ctv_regs = { 0xac0420, 0x44c0478, 0x4a4, 0x4fc0868, + 0x8940028, 0x60054, 0xe80870, 0xbf70000, + 0xbc70004, 0x70005, 0x70012, 0x70013, + 0x40f0014, 0x70230, 0xbf70232, 0xbf70233, + 0x1c70237, 0x70238, 0x70244, 0x70245, + 0x40f0246, 0x70462, 0x1f70464, 0x0, + 0x2e40001, 0x58, 0x2e001e, 0x258012c, + 0xa0aa04ec, 0x30, 0x815f004c, 0x12c0300, + 0xc000204c, 0x600, 0x3206004c, 0x0, 0x0, 0x0 + } } } } +}; + +/* + * The following is some guesswork on how the TV encoder flicker + * filter/rescaler works: + * + * It seems to use some sort of resampling filter, it is controlled + * through the registers at NV_PTV_HFILTER and NV_PTV_VFILTER, they + * control the horizontal and vertical stage respectively, there is + * also NV_PTV_HFILTER2 the blob fills identically to NV_PTV_HFILTER, + * but they seem to do nothing. A rough guess might be that they could + * be used to independently control the filtering of each interlaced + * field, but I don't know how they are enabled. The whole filtering + * process seems to be disabled with bits 26:27 of PTV_200, but we + * aren't doing that. + * + * The layout of both register sets is the same: + * + * A: [BASE+0x18]...[BASE+0x0] [BASE+0x58]..[BASE+0x40] + * B: [BASE+0x34]...[BASE+0x1c] [BASE+0x74]..[BASE+0x5c] + * + * Each coefficient is stored in bits [31],[15:9] in two's complement + * format. They seem to be some kind of weights used in a low-pass + * filter. Both A and B coefficients are applied to the 14 nearest + * samples on each side (Listed from nearest to furthermost. They + * roughly cover 2 framebuffer pixels on each side). They are + * probably multiplied with some more hardwired weights before being + * used: B-coefficients are applied the same on both sides, + * A-coefficients are inverted before being applied to the opposite + * side. + * + * After all the hassle, I got the following formula by empirical + * means... + */ + +#define calc_overscan(o) interpolate(0x100, 0xe1, 0xc1, o) + +#define id1 (1LL << 8) +#define id2 (1LL << 16) +#define id3 (1LL << 24) +#define id4 (1LL << 32) +#define id5 (1LL << 48) + +static struct filter_params{ + int64_t k1; + int64_t ki; + int64_t ki2; + int64_t ki3; + int64_t kr; + int64_t kir; + int64_t ki2r; + int64_t ki3r; + int64_t kf; + int64_t kif; + int64_t ki2f; + int64_t ki3f; + int64_t krf; + int64_t kirf; + int64_t ki2rf; + int64_t ki3rf; +} fparams[2][4] = { + /* Horizontal filter parameters */ + { + {64.311690 * id5, -39.516924 * id5, 6.586143 * id5, 0.000002 * id5, + 0.051285 * id4, 26.168746 * id4, -4.361449 * id4, -0.000001 * id4, + 9.308169 * id3, 78.180965 * id3, -13.030158 * id3, -0.000001 * id3, + -8.801540 * id1, -46.572890 * id1, 7.762145 * id1, -0.000000 * id1}, + {-44.565569 * id5, -68.081246 * id5, 39.812074 * id5, -4.009316 * id5, + 29.832207 * id4, 50.047322 * id4, -25.380017 * id4, 2.546422 * id4, + 104.605622 * id3, 141.908641 * id3, -74.322319 * id3, 7.484316 * id3, + -37.081621 * id1, -90.397510 * id1, 42.784229 * id1, -4.289952 * id1}, + {-56.793244 * id5, 31.153584 * id5, -5.192247 * id5, -0.000003 * id5, + 33.541131 * id4, -34.149302 * id4, 5.691537 * id4, 0.000002 * id4, + 87.196610 * id3, -88.995169 * id3, 14.832456 * id3, 0.000012 * id3, + 17.288138 * id1, 71.864786 * id1, -11.977408 * id1, -0.000009 * id1}, + {51.787796 * id5, 21.211771 * id5, -18.993730 * id5, 1.853310 * id5, + -41.470726 * id4, -17.775823 * id4, 13.057821 * id4, -1.15823 * id4, + -154.235673 * id3, -44.878641 * id3, 40.656077 * id3, -3.695595 * id3, + 112.201065 * id1, 39.992155 * id1, -25.155714 * id1, 2.113984 * id1}, + }, + + /* Vertical filter parameters */ + { + {67.601979 * id5, 0.428319 * id5, -0.071318 * id5, -0.000012 * id5, + -3.402339 * id4, 0.000209 * id4, -0.000092 * id4, 0.000010 * id4, + -9.180996 * id3, 6.111270 * id3, -1.024457 * id3, 0.001043 * id3, + 6.060315 * id1, -0.017425 * id1, 0.007830 * id1, -0.000869 * id1}, + {6.755647 * id5, 5.841348 * id5, 1.469734 * id5, -0.149656 * id5, + 8.293120 * id4, -1.192888 * id4, -0.947652 * id4, 0.094507 * id4, + 37.526655 * id3, 10.257875 * id3, -10.823275 * id3, 1.081497 * id3, + -2.361928 * id1, -2.059432 * id1, 1.840671 * id1, -0.168100 * id1}, + {-14.780391 * id5, -16.042148 * id5, 2.673692 * id5, -0.000000 * id5, + 39.541978 * id4, 5.680053 * id4, -0.946676 * id4, 0.000000 * id4, + 152.994486 * id3, 12.625439 * id3, -2.119579 * id3, 0.002708 * id3, + -38.125089 * id1, -0.855880 * id1, 0.155359 * id1, -0.002245 * id1}, + {-27.476193 * id5, -1.454976 * id5, 1.286557 * id5, 0.025346 * id5, + 20.687300 * id4, 3.014003 * id4, -0.557786 * id4, -0.01311 * id4, + 60.008737 * id3, -0.738273 * id3, 5.408217 * id3, -0.796798 * id3, + -17.296835 * id1, 4.438577 * id1, -2.809420 * id1, 0.385491 * id1}, + } +}; + +static void tv_setup_filter(struct drm_encoder *encoder) +{ + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + struct drm_display_mode *mode = &encoder->crtc->mode; + uint32_t (*filters[])[4][7] = {&tv_enc->state.hfilter, + &tv_enc->state.vfilter}; + int i, j, k; + int32_t overscan = calc_overscan(tv_enc->overscan); + int64_t flicker = (tv_enc->flicker - 50) * (id3 / 100); + uint64_t rs[] = {mode->hdisplay * id3, + mode->vdisplay * id3}; + + do_div(rs[0], overscan * tv_norm->tv_enc_mode.hdisplay); + do_div(rs[1], overscan * tv_norm->tv_enc_mode.vdisplay); + + for (k = 0; k < 2; k++) { + rs[k] = max((int64_t)rs[k], id2); + + for (j = 0; j < 4; j++) { + struct filter_params *p = &fparams[k][j]; + + for (i = 0; i < 7; i++) { + int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + + p->ki3*i*i*i) + + (p->kr + p->kir*i + p->ki2r*i*i + + p->ki3r*i*i*i) * rs[k] + + (p->kf + p->kif*i + p->ki2f*i*i + + p->ki3f*i*i*i) * flicker + + (p->krf + p->kirf*i + p->ki2rf*i*i + + p->ki3rf*i*i*i) * flicker * rs[k]; + + (*filters[k])[j][i] = (c + id5/2) >> 39 + & (0x1 << 31 | 0x7f << 9); + } + } + } +} + +/* Hardware state saving/restoring */ + +static void tv_save_filter(struct drm_device *dev, uint32_t base, + uint32_t regs[4][7]) +{ + int i, j; + uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c }; + + for (i = 0; i < 4; i++) { + for (j = 0; j < 7; j++) + regs[i][j] = nv_read_ptv(dev, offsets[i]+4*j); + } +} + +static void tv_load_filter(struct drm_device *dev, uint32_t base, + uint32_t regs[4][7]) +{ + int i, j; + uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c }; + + for (i = 0; i < 4; i++) { + for (j = 0; j < 7; j++) + nv_write_ptv(dev, offsets[i]+4*j, regs[i][j]); + } +} + +void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state) +{ + int i; + + for (i = 0; i < 0x40; i++) + state->tv_enc[i] = nv_read_tv_enc(dev, i); + + tv_save_filter(dev, NV_PTV_HFILTER, state->hfilter); + tv_save_filter(dev, NV_PTV_HFILTER2, state->hfilter2); + tv_save_filter(dev, NV_PTV_VFILTER, state->vfilter); + + nv_save_ptv(dev, state, 200); + nv_save_ptv(dev, state, 204); + nv_save_ptv(dev, state, 208); + nv_save_ptv(dev, state, 20c); + nv_save_ptv(dev, state, 304); + nv_save_ptv(dev, state, 500); + nv_save_ptv(dev, state, 504); + nv_save_ptv(dev, state, 508); + nv_save_ptv(dev, state, 600); + nv_save_ptv(dev, state, 604); + nv_save_ptv(dev, state, 608); + nv_save_ptv(dev, state, 60c); + nv_save_ptv(dev, state, 610); + nv_save_ptv(dev, state, 614); +} + +void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state) +{ + int i; + + for (i = 0; i < 0x40; i++) + nv_write_tv_enc(dev, i, state->tv_enc[i]); + + tv_load_filter(dev, NV_PTV_HFILTER, state->hfilter); + tv_load_filter(dev, NV_PTV_HFILTER2, state->hfilter2); + tv_load_filter(dev, NV_PTV_VFILTER, state->vfilter); + + nv_load_ptv(dev, state, 200); + nv_load_ptv(dev, state, 204); + nv_load_ptv(dev, state, 208); + nv_load_ptv(dev, state, 20c); + nv_load_ptv(dev, state, 304); + nv_load_ptv(dev, state, 500); + nv_load_ptv(dev, state, 504); + nv_load_ptv(dev, state, 508); + nv_load_ptv(dev, state, 600); + nv_load_ptv(dev, state, 604); + nv_load_ptv(dev, state, 608); + nv_load_ptv(dev, state, 60c); + nv_load_ptv(dev, state, 610); + nv_load_ptv(dev, state, 614); + + /* This is required for some settings to kick in. */ + nv_write_tv_enc(dev, 0x3e, 1); + nv_write_tv_enc(dev, 0x3e, 0); +} + +/* Timings similar to the ones the blob sets */ + +const struct drm_display_mode nv17_tv_modes[] = { + { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0, + 320, 344, 392, 560, 0, 200, 200, 202, 220, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC + | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) }, + { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 0, + 320, 344, 392, 560, 0, 240, 240, 246, 263, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC + | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) }, + { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 0, + 400, 432, 496, 640, 0, 300, 300, 303, 314, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC + | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) }, + { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 0, + 640, 672, 768, 880, 0, 480, 480, 492, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 0, + 720, 752, 872, 960, 0, 480, 480, 493, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 0, + 720, 776, 856, 960, 0, 576, 576, 588, 597, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 0, + 800, 840, 920, 1040, 0, 600, 600, 604, 618, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 0, + 1024, 1064, 1200, 1344, 0, 768, 768, 777, 806, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, + {} +}; + +void nv17_tv_update_properties(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + struct nv17_tv_state *regs = &tv_enc->state; + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + int subconnector = tv_enc->select_subconnector ? + tv_enc->select_subconnector : + tv_enc->subconnector; + + switch (subconnector) { + case DRM_MODE_SUBCONNECTOR_Composite: + { + regs->ptv_204 = 0x2; + + /* The composite connector may be found on either pin. */ + if (tv_enc->pin_mask & 0x4) + regs->ptv_204 |= 0x010000; + else if (tv_enc->pin_mask & 0x2) + regs->ptv_204 |= 0x100000; + else + regs->ptv_204 |= 0x110000; + + regs->tv_enc[0x7] = 0x10; + break; + } + case DRM_MODE_SUBCONNECTOR_SVIDEO: + regs->ptv_204 = 0x11012; + regs->tv_enc[0x7] = 0x18; + break; + + case DRM_MODE_SUBCONNECTOR_Component: + regs->ptv_204 = 0x111333; + regs->tv_enc[0x7] = 0x14; + break; + + case DRM_MODE_SUBCONNECTOR_SCART: + regs->ptv_204 = 0x111012; + regs->tv_enc[0x7] = 0x18; + break; + } + + regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], + 255, tv_enc->saturation); + regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], + 255, tv_enc->saturation); + regs->tv_enc[0x25] = tv_enc->hue * 255 / 100; + + nv_load_ptv(dev, regs, 204); + nv_load_tv_enc(dev, regs, 7); + nv_load_tv_enc(dev, regs, 20); + nv_load_tv_enc(dev, regs, 22); + nv_load_tv_enc(dev, regs, 25); +} + +void nv17_tv_update_rescaler(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + struct nv17_tv_state *regs = &tv_enc->state; + + regs->ptv_208 = 0x40 | (calc_overscan(tv_enc->overscan) << 8); + + tv_setup_filter(encoder); + + nv_load_ptv(dev, regs, 208); + tv_load_filter(dev, NV_PTV_HFILTER, regs->hfilter); + tv_load_filter(dev, NV_PTV_HFILTER2, regs->hfilter2); + tv_load_filter(dev, NV_PTV_VFILTER, regs->vfilter); +} + +void nv17_ctv_update_rescaler(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + int head = nouveau_crtc(encoder->crtc)->index; + struct nv04_crtc_reg *regs = &nv04_display(dev)->mode_reg.crtc_reg[head]; + struct drm_display_mode *crtc_mode = &encoder->crtc->mode; + struct drm_display_mode *output_mode = + &get_tv_norm(encoder)->ctv_enc_mode.mode; + int overscan, hmargin, vmargin, hratio, vratio; + + /* The rescaler doesn't do the right thing for interlaced modes. */ + if (output_mode->flags & DRM_MODE_FLAG_INTERLACE) + overscan = 100; + else + overscan = tv_enc->overscan; + + hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2; + vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2; + + hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), + hmargin, overscan); + vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), + vmargin, overscan); + + hratio = crtc_mode->hdisplay * 0x800 / + (output_mode->hdisplay - 2*hmargin); + vratio = crtc_mode->vdisplay * 0x800 / + (output_mode->vdisplay - 2*vmargin) & ~3; + + regs->fp_horiz_regs[FP_VALID_START] = hmargin; + regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1; + regs->fp_vert_regs[FP_VALID_START] = vmargin; + regs->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - vmargin - 1; + + regs->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE | + XLATE(vratio, 0, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE) | + NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE | + XLATE(hratio, 0, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE); + + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_START, + regs->fp_horiz_regs[FP_VALID_START]); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_END, + regs->fp_horiz_regs[FP_VALID_END]); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_START, + regs->fp_vert_regs[FP_VALID_START]); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_END, + regs->fp_vert_regs[FP_VALID_END]); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regs->fp_debug_1); +} diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c new file mode 100644 index 0000000..bf13db4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c @@ -0,0 +1,246 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <drm/drmP.h> +#include "nouveau_drm.h" +#include "nouveau_reg.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_crtc.h" +#include "hw.h" +#include <drm/drm_crtc_helper.h> + +#include <drm/i2c/ch7006.h> + +#include <subdev/i2c.h> + +static struct i2c_board_info nv04_tv_encoder_info[] = { + { + I2C_BOARD_INFO("ch7006", 0x75), + .platform_data = &(struct ch7006_encoder_params) { + CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER, + 0, 0, 0, + CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED, + CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC + } + }, + { } +}; + +int nv04_tv_identify(struct drm_device *dev, int i2c_index) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_i2c *i2c = nouveau_i2c(drm->device); + + return i2c->identify(i2c, i2c_index, "TV encoder", + nv04_tv_encoder_info, NULL); +} + + +#define PLLSEL_TV_CRTC1_MASK \ + (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \ + | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1) +#define PLLSEL_TV_CRTC2_MASK \ + (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 \ + | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2) + +static void nv04_tv_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nv04_mode_state *state = &nv04_display(dev)->mode_reg; + uint8_t crtc1A; + + NV_DEBUG(drm, "Setting dpms mode %d on TV encoder (output %d)\n", + mode, nv_encoder->dcb->index); + + state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK); + + if (mode == DRM_MODE_DPMS_ON) { + int head = nouveau_crtc(encoder->crtc)->index; + crtc1A = NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX); + + state->pllsel |= head ? PLLSEL_TV_CRTC2_MASK : + PLLSEL_TV_CRTC1_MASK; + + /* Inhibit hsync */ + crtc1A |= 0x80; + + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX, crtc1A); + } + + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel); + + get_slave_funcs(encoder)->dpms(encoder, mode); +} + +static void nv04_tv_bind(struct drm_device *dev, int head, bool bind) +{ + struct nv04_crtc_reg *state = &nv04_display(dev)->mode_reg.crtc_reg[head]; + + state->tv_setup = 0; + + if (bind) + state->CRTC[NV_CIO_CRE_49] |= 0x10; + else + state->CRTC[NV_CIO_CRE_49] &= ~0x10; + + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX, + state->CRTC[NV_CIO_CRE_LCD__INDEX]); + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_49, + state->CRTC[NV_CIO_CRE_49]); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, + state->tv_setup); +} + +static void nv04_tv_prepare(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + int head = nouveau_crtc(encoder->crtc)->index; + struct drm_encoder_helper_funcs *helper = encoder->helper_private; + + helper->dpms(encoder, DRM_MODE_DPMS_OFF); + + nv04_dfp_disable(dev, head); + + if (nv_two_heads(dev)) + nv04_tv_bind(dev, head ^ 1, false); + + nv04_tv_bind(dev, head, true); +} + +static void nv04_tv_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; + + regp->tv_htotal = adjusted_mode->htotal; + regp->tv_vtotal = adjusted_mode->vtotal; + + /* These delay the TV signals with respect to the VGA port, + * they might be useful if we ever allow a CRTC to drive + * multiple outputs. + */ + regp->tv_hskew = 1; + regp->tv_hsync_delay = 1; + regp->tv_hsync_delay2 = 64; + regp->tv_vskew = 1; + regp->tv_vsync_delay = 1; + + get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode); +} + +static void nv04_tv_commit(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct drm_encoder_helper_funcs *helper = encoder->helper_private; + + helper->dpms(encoder, DRM_MODE_DPMS_ON); + + NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n", + drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); +} + +static void nv04_tv_destroy(struct drm_encoder *encoder) +{ + get_slave_funcs(encoder)->destroy(encoder); + drm_encoder_cleanup(encoder); + + kfree(encoder->helper_private); + kfree(nouveau_encoder(encoder)); +} + +static const struct drm_encoder_funcs nv04_tv_funcs = { + .destroy = nv04_tv_destroy, +}; + +static const struct drm_encoder_helper_funcs nv04_tv_helper_funcs = { + .dpms = nv04_tv_dpms, + .save = drm_i2c_encoder_save, + .restore = drm_i2c_encoder_restore, + .mode_fixup = drm_i2c_encoder_mode_fixup, + .prepare = nv04_tv_prepare, + .commit = nv04_tv_commit, + .mode_set = nv04_tv_mode_set, + .detect = drm_i2c_encoder_detect, +}; + +int +nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry) +{ + struct nouveau_encoder *nv_encoder; + struct drm_encoder *encoder; + struct drm_device *dev = connector->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_i2c *i2c = nouveau_i2c(drm->device); + struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index); + int type, ret; + + /* Ensure that we can talk to this encoder */ + type = nv04_tv_identify(dev, entry->i2c_index); + if (type < 0) + return type; + + /* Allocate the necessary memory */ + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + + /* Initialize the common members */ + encoder = to_drm_encoder(nv_encoder); + + drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC); + drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs); + + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + nv_encoder->dcb = entry; + nv_encoder->or = ffs(entry->or) - 1; + + /* Run the slave-specific initialization */ + ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder), + &port->adapter, &nv04_tv_encoder_info[type]); + if (ret < 0) + goto fail_cleanup; + + /* Attach it to the specified connector. */ + get_slave_funcs(encoder)->create_resources(encoder, connector); + drm_mode_connector_attach_encoder(connector, encoder); + + return 0; + +fail_cleanup: + drm_encoder_cleanup(encoder); + kfree(nv_encoder); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c new file mode 100644 index 0000000..acef48f --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c @@ -0,0 +1,843 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include "nouveau_drm.h" +#include "nouveau_reg.h" +#include "nouveau_encoder.h" +#include "nouveau_connector.h" +#include "nouveau_crtc.h" +#include "hw.h" +#include "tvnv17.h" + +#include <core/device.h> + +#include <subdev/bios/gpio.h> +#include <subdev/gpio.h> + +MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" + "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" + "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" + "\t\tDefault: PAL\n" + "\t\t*NOTE* Ignored for cards with external TV encoders."); +static char *nouveau_tv_norm; +module_param_named(tv_norm, nouveau_tv_norm, charp, 0400); + +static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_gpio *gpio = nouveau_gpio(drm->device); + uint32_t testval, regoffset = nv04_dac_output_offset(encoder); + uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end, + fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c; + uint32_t sample = 0; + int head; + +#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) + testval = RGB_TEST_DATA(0x82, 0xeb, 0x82); + if (drm->vbios.tvdactestval) + testval = drm->vbios.tvdactestval; + + dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); + head = (dacclk & 0x100) >> 8; + + /* Save the previous state. */ + gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff); + gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff); + fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL); + fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START); + fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END); + fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); + test_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); + ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c); + ctv_14 = NVReadRAMDAC(dev, head, 0x680c14); + ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c); + + /* Prepare the DAC for load detection. */ + gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, true); + gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, true); + + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, + NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | + NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 | + NV_PRAMDAC_FP_TG_CONTROL_READ_PROG | + NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | + NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS); + + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 0); + + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, + (dacclk & ~0xff) | 0x22); + msleep(1); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, + (dacclk & ~0xff) | 0x21); + + NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20); + NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16); + + /* Sample pin 0x4 (usually S-video luma). */ + NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff); + msleep(20); + sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset) + & 0x4 << 28; + + /* Sample the remaining pins. */ + NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff); + msleep(20); + sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset) + & 0xa << 28; + + /* Restore the previous state. */ + NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c); + NVWriteRAMDAC(dev, head, 0x680c14, ctv_14); + NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, dacclk); + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, test_ctrl); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start); + NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal); + gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, gpio1); + gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, gpio0); + + return sample; +} + +static bool +get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_object *device = drm->device; + + /* Zotac FX5200 */ + if (nv_device_match(device, 0x0322, 0x19da, 0x1035) || + nv_device_match(device, 0x0322, 0x19da, 0x2035)) { + *pin_mask = 0xc; + return false; + } + + /* MSI nForce2 IGP */ + if (nv_device_match(device, 0x01f0, 0x1462, 0x5710)) { + *pin_mask = 0xc; + return false; + } + + return true; +} + +static enum drm_connector_status +nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct drm_mode_config *conf = &dev->mode_config; + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + struct dcb_output *dcb = tv_enc->base.dcb; + bool reliable = get_tv_detect_quirks(dev, &tv_enc->pin_mask); + + if (nv04_dac_in_use(encoder)) + return connector_status_disconnected; + + if (reliable) { + if (nv_device(drm->device)->chipset == 0x42 || + nv_device(drm->device)->chipset == 0x43) + tv_enc->pin_mask = + nv42_tv_sample_load(encoder) >> 28 & 0xe; + else + tv_enc->pin_mask = + nv17_dac_sample_load(encoder) >> 28 & 0xe; + } + + switch (tv_enc->pin_mask) { + case 0x2: + case 0x4: + tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Composite; + break; + case 0xc: + tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO; + break; + case 0xe: + if (dcb->tvconf.has_component_output) + tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component; + else + tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART; + break; + default: + tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown; + break; + } + + drm_object_property_set_value(&connector->base, + conf->tv_subconnector_property, + tv_enc->subconnector); + + if (!reliable) { + return connector_status_unknown; + } else if (tv_enc->subconnector) { + NV_INFO(drm, "Load detected on output %c\n", + '@' + ffs(dcb->or)); + return connector_status_connected; + } else { + return connector_status_disconnected; + } +} + +static int nv17_tv_get_ld_modes(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + const struct drm_display_mode *tv_mode; + int n = 0; + + for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) { + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(encoder->dev, tv_mode); + + mode->clock = tv_norm->tv_enc_mode.vrefresh * + mode->htotal / 1000 * + mode->vtotal / 1000; + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + mode->clock *= 2; + + if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay && + mode->vdisplay == tv_norm->tv_enc_mode.vdisplay) + mode->type |= DRM_MODE_TYPE_PREFERRED; + + drm_mode_probed_add(connector, mode); + n++; + } + + return n; +} + +static int nv17_tv_get_hd_modes(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + struct drm_display_mode *output_mode = &tv_norm->ctv_enc_mode.mode; + struct drm_display_mode *mode; + const struct { + int hdisplay; + int vdisplay; + } modes[] = { + { 640, 400 }, + { 640, 480 }, + { 720, 480 }, + { 720, 576 }, + { 800, 600 }, + { 1024, 768 }, + { 1280, 720 }, + { 1280, 1024 }, + { 1920, 1080 } + }; + int i, n = 0; + + for (i = 0; i < ARRAY_SIZE(modes); i++) { + if (modes[i].hdisplay > output_mode->hdisplay || + modes[i].vdisplay > output_mode->vdisplay) + continue; + + if (modes[i].hdisplay == output_mode->hdisplay && + modes[i].vdisplay == output_mode->vdisplay) { + mode = drm_mode_duplicate(encoder->dev, output_mode); + mode->type |= DRM_MODE_TYPE_PREFERRED; + + } else { + mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay, + modes[i].vdisplay, 60, false, + (output_mode->flags & + DRM_MODE_FLAG_INTERLACE), false); + } + + /* CVT modes are sometimes unsuitable... */ + if (output_mode->hdisplay <= 720 + || output_mode->hdisplay >= 1920) { + mode->htotal = output_mode->htotal; + mode->hsync_start = (mode->hdisplay + (mode->htotal + - mode->hdisplay) * 9 / 10) & ~7; + mode->hsync_end = mode->hsync_start + 8; + } + + if (output_mode->vdisplay >= 1024) { + mode->vtotal = output_mode->vtotal; + mode->vsync_start = output_mode->vsync_start; + mode->vsync_end = output_mode->vsync_end; + } + + mode->type |= DRM_MODE_TYPE_DRIVER; + drm_mode_probed_add(connector, mode); + n++; + } + + return n; +} + +static int nv17_tv_get_modes(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + + if (tv_norm->kind == CTV_ENC_MODE) + return nv17_tv_get_hd_modes(encoder, connector); + else + return nv17_tv_get_ld_modes(encoder, connector); +} + +static int nv17_tv_mode_valid(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + + if (tv_norm->kind == CTV_ENC_MODE) { + struct drm_display_mode *output_mode = + &tv_norm->ctv_enc_mode.mode; + + if (mode->clock > 400000) + return MODE_CLOCK_HIGH; + + if (mode->hdisplay > output_mode->hdisplay || + mode->vdisplay > output_mode->vdisplay) + return MODE_BAD; + + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) != + (output_mode->flags & DRM_MODE_FLAG_INTERLACE)) + return MODE_NO_INTERLACE; + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + + } else { + const int vsync_tolerance = 600; + + if (mode->clock > 70000) + return MODE_CLOCK_HIGH; + + if (abs(drm_mode_vrefresh(mode) * 1000 - + tv_norm->tv_enc_mode.vrefresh) > vsync_tolerance) + return MODE_VSYNC; + + /* The encoder takes care of the actual interlacing */ + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + return MODE_NO_INTERLACE; + } + + return MODE_OK; +} + +static bool nv17_tv_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + + if (nv04_dac_in_use(encoder)) + return false; + + if (tv_norm->kind == CTV_ENC_MODE) + adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock; + else + adjusted_mode->clock = 90000; + + return true; +} + +static void nv17_tv_dpms(struct drm_encoder *encoder, int mode) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_gpio *gpio = nouveau_gpio(drm->device); + struct nv17_tv_state *regs = &to_tv_enc(encoder)->state; + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + + if (nouveau_encoder(encoder)->last_dpms == mode) + return; + nouveau_encoder(encoder)->last_dpms = mode; + + NV_INFO(drm, "Setting dpms mode %d on TV encoder (output %d)\n", + mode, nouveau_encoder(encoder)->dcb->index); + + regs->ptv_200 &= ~1; + + if (tv_norm->kind == CTV_ENC_MODE) { + nv04_dfp_update_fp_control(encoder, mode); + + } else { + nv04_dfp_update_fp_control(encoder, DRM_MODE_DPMS_OFF); + + if (mode == DRM_MODE_DPMS_ON) + regs->ptv_200 |= 1; + } + + nv_load_ptv(dev, regs, 200); + + gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, mode == DRM_MODE_DPMS_ON); + gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, mode == DRM_MODE_DPMS_ON); + + nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); +} + +static void nv17_tv_prepare(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct drm_encoder_helper_funcs *helper = encoder->helper_private; + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + int head = nouveau_crtc(encoder->crtc)->index; + uint8_t *cr_lcd = &nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[ + NV_CIO_CRE_LCD__INDEX]; + uint32_t dacclk_off = NV_PRAMDAC_DACCLK + + nv04_dac_output_offset(encoder); + uint32_t dacclk; + + helper->dpms(encoder, DRM_MODE_DPMS_OFF); + + nv04_dfp_disable(dev, head); + + /* Unbind any FP encoders from this head if we need the FP + * stuff enabled. */ + if (tv_norm->kind == CTV_ENC_MODE) { + struct drm_encoder *enc; + + list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { + struct dcb_output *dcb = nouveau_encoder(enc)->dcb; + + if ((dcb->type == DCB_OUTPUT_TMDS || + dcb->type == DCB_OUTPUT_LVDS) && + !enc->crtc && + nv04_dfp_get_bound_head(dev, dcb) == head) { + nv04_dfp_bind_head(dev, dcb, head ^ 1, + drm->vbios.fp.dual_link); + } + } + + } + + if (tv_norm->kind == CTV_ENC_MODE) + *cr_lcd |= 0x1 | (head ? 0x0 : 0x8); + + /* Set the DACCLK register */ + dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; + + if (nv_device(drm->device)->card_type == NV_40) + dacclk |= 0x1a << 16; + + if (tv_norm->kind == CTV_ENC_MODE) { + dacclk |= 0x20; + + if (head) + dacclk |= 0x100; + else + dacclk &= ~0x100; + + } else { + dacclk |= 0x10; + + } + + NVWriteRAMDAC(dev, 0, dacclk_off, dacclk); +} + +static void nv17_tv_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *drm_mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + int head = nouveau_crtc(encoder->crtc)->index; + struct nv04_crtc_reg *regs = &nv04_display(dev)->mode_reg.crtc_reg[head]; + struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state; + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + int i; + + regs->CRTC[NV_CIO_CRE_53] = 0x40; /* FP_HTIMING */ + regs->CRTC[NV_CIO_CRE_54] = 0; /* FP_VTIMING */ + regs->ramdac_630 = 0x2; /* turn off green mode (tv test pattern?) */ + regs->tv_setup = 1; + regs->ramdac_8c0 = 0x0; + + if (tv_norm->kind == TV_ENC_MODE) { + tv_regs->ptv_200 = 0x13111100; + if (head) + tv_regs->ptv_200 |= 0x10; + + tv_regs->ptv_20c = 0x808010; + tv_regs->ptv_304 = 0x2d00000; + tv_regs->ptv_600 = 0x0; + tv_regs->ptv_60c = 0x0; + tv_regs->ptv_610 = 0x1e00000; + + if (tv_norm->tv_enc_mode.vdisplay == 576) { + tv_regs->ptv_508 = 0x1200000; + tv_regs->ptv_614 = 0x33; + + } else if (tv_norm->tv_enc_mode.vdisplay == 480) { + tv_regs->ptv_508 = 0xf00000; + tv_regs->ptv_614 = 0x13; + } + + if (nv_device(drm->device)->card_type >= NV_30) { + tv_regs->ptv_500 = 0xe8e0; + tv_regs->ptv_504 = 0x1710; + tv_regs->ptv_604 = 0x0; + tv_regs->ptv_608 = 0x0; + } else { + if (tv_norm->tv_enc_mode.vdisplay == 576) { + tv_regs->ptv_604 = 0x20; + tv_regs->ptv_608 = 0x10; + tv_regs->ptv_500 = 0x19710; + tv_regs->ptv_504 = 0x68f0; + + } else if (tv_norm->tv_enc_mode.vdisplay == 480) { + tv_regs->ptv_604 = 0x10; + tv_regs->ptv_608 = 0x20; + tv_regs->ptv_500 = 0x4b90; + tv_regs->ptv_504 = 0x1b480; + } + } + + for (i = 0; i < 0x40; i++) + tv_regs->tv_enc[i] = tv_norm->tv_enc_mode.tv_enc[i]; + + } else { + struct drm_display_mode *output_mode = + &tv_norm->ctv_enc_mode.mode; + + /* The registers in PRAMDAC+0xc00 control some timings and CSC + * parameters for the CTV encoder (It's only used for "HD" TV + * modes, I don't think I have enough working to guess what + * they exactly mean...), it's probably connected at the + * output of the FP encoder, but it also needs the analog + * encoder in its OR enabled and routed to the head it's + * using. It's enabled with the DACCLK register, bits [5:4]. + */ + for (i = 0; i < 38; i++) + regs->ctv_regs[i] = tv_norm->ctv_enc_mode.ctv_regs[i]; + + regs->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1; + regs->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1; + regs->fp_horiz_regs[FP_SYNC_START] = + output_mode->hsync_start - 1; + regs->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1; + regs->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay + + max((output_mode->hdisplay-600)/40 - 1, 1); + + regs->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1; + regs->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1; + regs->fp_vert_regs[FP_SYNC_START] = + output_mode->vsync_start - 1; + regs->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1; + regs->fp_vert_regs[FP_CRTC] = output_mode->vdisplay - 1; + + regs->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | + NV_PRAMDAC_FP_TG_CONTROL_READ_PROG | + NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12; + + if (output_mode->flags & DRM_MODE_FLAG_PVSYNC) + regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS; + if (output_mode->flags & DRM_MODE_FLAG_PHSYNC) + regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS; + + regs->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND | + NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND | + NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR | + NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR | + NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED | + NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE | + NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE; + + regs->fp_debug_2 = 0; + + regs->fp_margin_color = 0x801080; + + } +} + +static void nv17_tv_commit(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_encoder_helper_funcs *helper = encoder->helper_private; + + if (get_tv_norm(encoder)->kind == TV_ENC_MODE) { + nv17_tv_update_rescaler(encoder); + nv17_tv_update_properties(encoder); + } else { + nv17_ctv_update_rescaler(encoder); + } + + nv17_tv_state_load(dev, &to_tv_enc(encoder)->state); + + /* This could use refinement for flatpanels, but it should work */ + if (nv_device(drm->device)->chipset < 0x44) + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + + nv04_dac_output_offset(encoder), + 0xf0000000); + else + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + + nv04_dac_output_offset(encoder), + 0x00100000); + + helper->dpms(encoder, DRM_MODE_DPMS_ON); + + NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n", + drm_get_connector_name( + &nouveau_encoder_connector_get(nv_encoder)->base), + nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); +} + +static void nv17_tv_save(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + + nouveau_encoder(encoder)->restore.output = + NVReadRAMDAC(dev, 0, + NV_PRAMDAC_DACCLK + + nv04_dac_output_offset(encoder)); + + nv17_tv_state_save(dev, &tv_enc->saved_state); + + tv_enc->state.ptv_200 = tv_enc->saved_state.ptv_200; +} + +static void nv17_tv_restore(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + + nv04_dac_output_offset(encoder), + nouveau_encoder(encoder)->restore.output); + + nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); + + nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED; +} + +static int nv17_tv_create_resources(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct drm_mode_config *conf = &dev->mode_config; + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; + int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS : + NUM_LD_TV_NORMS; + int i; + + if (nouveau_tv_norm) { + for (i = 0; i < num_tv_norms; i++) { + if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) { + tv_enc->tv_norm = i; + break; + } + } + + if (i == num_tv_norms) + NV_WARN(drm, "Invalid TV norm setting \"%s\"\n", + nouveau_tv_norm); + } + + drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names); + + drm_object_attach_property(&connector->base, + conf->tv_select_subconnector_property, + tv_enc->select_subconnector); + drm_object_attach_property(&connector->base, + conf->tv_subconnector_property, + tv_enc->subconnector); + drm_object_attach_property(&connector->base, + conf->tv_mode_property, + tv_enc->tv_norm); + drm_object_attach_property(&connector->base, + conf->tv_flicker_reduction_property, + tv_enc->flicker); + drm_object_attach_property(&connector->base, + conf->tv_saturation_property, + tv_enc->saturation); + drm_object_attach_property(&connector->base, + conf->tv_hue_property, + tv_enc->hue); + drm_object_attach_property(&connector->base, + conf->tv_overscan_property, + tv_enc->overscan); + + return 0; +} + +static int nv17_tv_set_property(struct drm_encoder *encoder, + struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + struct drm_mode_config *conf = &encoder->dev->mode_config; + struct drm_crtc *crtc = encoder->crtc; + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + bool modes_changed = false; + + if (property == conf->tv_overscan_property) { + tv_enc->overscan = val; + if (encoder->crtc) { + if (tv_norm->kind == CTV_ENC_MODE) + nv17_ctv_update_rescaler(encoder); + else + nv17_tv_update_rescaler(encoder); + } + + } else if (property == conf->tv_saturation_property) { + if (tv_norm->kind != TV_ENC_MODE) + return -EINVAL; + + tv_enc->saturation = val; + nv17_tv_update_properties(encoder); + + } else if (property == conf->tv_hue_property) { + if (tv_norm->kind != TV_ENC_MODE) + return -EINVAL; + + tv_enc->hue = val; + nv17_tv_update_properties(encoder); + + } else if (property == conf->tv_flicker_reduction_property) { + if (tv_norm->kind != TV_ENC_MODE) + return -EINVAL; + + tv_enc->flicker = val; + if (encoder->crtc) + nv17_tv_update_rescaler(encoder); + + } else if (property == conf->tv_mode_property) { + if (connector->dpms != DRM_MODE_DPMS_OFF) + return -EINVAL; + + tv_enc->tv_norm = val; + + modes_changed = true; + + } else if (property == conf->tv_select_subconnector_property) { + if (tv_norm->kind != TV_ENC_MODE) + return -EINVAL; + + tv_enc->select_subconnector = val; + nv17_tv_update_properties(encoder); + + } else { + return -EINVAL; + } + + if (modes_changed) { + drm_helper_probe_single_connector_modes(connector, 0, 0); + + /* Disable the crtc to ensure a full modeset is + * performed whenever it's turned on again. */ + if (crtc) { + struct drm_mode_set modeset = { + .crtc = crtc, + }; + + drm_mode_set_config_internal(&modeset); + } + } + + return 0; +} + +static void nv17_tv_destroy(struct drm_encoder *encoder) +{ + struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); + + drm_encoder_cleanup(encoder); + kfree(tv_enc); +} + +static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = { + .dpms = nv17_tv_dpms, + .save = nv17_tv_save, + .restore = nv17_tv_restore, + .mode_fixup = nv17_tv_mode_fixup, + .prepare = nv17_tv_prepare, + .commit = nv17_tv_commit, + .mode_set = nv17_tv_mode_set, + .detect = nv17_tv_detect, +}; + +static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = { + .get_modes = nv17_tv_get_modes, + .mode_valid = nv17_tv_mode_valid, + .create_resources = nv17_tv_create_resources, + .set_property = nv17_tv_set_property, +}; + +static struct drm_encoder_funcs nv17_tv_funcs = { + .destroy = nv17_tv_destroy, +}; + +int +nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry) +{ + struct drm_device *dev = connector->dev; + struct drm_encoder *encoder; + struct nv17_tv_encoder *tv_enc = NULL; + + tv_enc = kzalloc(sizeof(*tv_enc), GFP_KERNEL); + if (!tv_enc) + return -ENOMEM; + + tv_enc->overscan = 50; + tv_enc->flicker = 50; + tv_enc->saturation = 50; + tv_enc->hue = 0; + tv_enc->tv_norm = TV_NORM_PAL; + tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown; + tv_enc->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic; + tv_enc->pin_mask = 0; + + encoder = to_drm_encoder(&tv_enc->base); + + tv_enc->base.dcb = entry; + tv_enc->base.or = ffs(entry->or) - 1; + + drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC); + drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs); + to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs; + + encoder->possible_crtcs = entry->heads; + encoder->possible_clones = 0; + + nv17_tv_create_resources(encoder, connector); + drm_mode_connector_attach_encoder(connector, encoder); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h new file mode 100644 index 0000000..7b33154 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2009 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NV17_TV_H__ +#define __NV17_TV_H__ + +struct nv17_tv_state { + uint8_t tv_enc[0x40]; + + uint32_t hfilter[4][7]; + uint32_t hfilter2[4][7]; + uint32_t vfilter[4][7]; + + uint32_t ptv_200; + uint32_t ptv_204; + uint32_t ptv_208; + uint32_t ptv_20c; + uint32_t ptv_304; + uint32_t ptv_500; + uint32_t ptv_504; + uint32_t ptv_508; + uint32_t ptv_600; + uint32_t ptv_604; + uint32_t ptv_608; + uint32_t ptv_60c; + uint32_t ptv_610; + uint32_t ptv_614; +}; + +enum nv17_tv_norm{ + TV_NORM_PAL, + TV_NORM_PAL_M, + TV_NORM_PAL_N, + TV_NORM_PAL_NC, + TV_NORM_NTSC_M, + TV_NORM_NTSC_J, + NUM_LD_TV_NORMS, + TV_NORM_HD480I = NUM_LD_TV_NORMS, + TV_NORM_HD480P, + TV_NORM_HD576I, + TV_NORM_HD576P, + TV_NORM_HD720P, + TV_NORM_HD1080I, + NUM_TV_NORMS +}; + +struct nv17_tv_encoder { + struct nouveau_encoder base; + + struct nv17_tv_state state; + struct nv17_tv_state saved_state; + + int overscan; + int flicker; + int saturation; + int hue; + enum nv17_tv_norm tv_norm; + int subconnector; + int select_subconnector; + uint32_t pin_mask; +}; +#define to_tv_enc(x) container_of(nouveau_encoder(x), \ + struct nv17_tv_encoder, base) + +extern char *nv17_tv_norm_names[NUM_TV_NORMS]; + +extern struct nv17_tv_norm_params { + enum { + TV_ENC_MODE, + CTV_ENC_MODE, + } kind; + + union { + struct { + int hdisplay; + int vdisplay; + int vrefresh; /* mHz */ + + uint8_t tv_enc[0x40]; + } tv_enc_mode; + + struct { + struct drm_display_mode mode; + + uint32_t ctv_regs[38]; + } ctv_enc_mode; + }; + +} nv17_tv_norms[NUM_TV_NORMS]; +#define get_tv_norm(enc) (&nv17_tv_norms[to_tv_enc(enc)->tv_norm]) + +extern const struct drm_display_mode nv17_tv_modes[]; + +static inline int interpolate(int y0, int y1, int y2, int x) +{ + return y1 + (x < 50 ? y1 - y0 : y2 - y1) * (x - 50) / 50; +} + +void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state); +void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state); +void nv17_tv_update_properties(struct drm_encoder *encoder); +void nv17_tv_update_rescaler(struct drm_encoder *encoder); +void nv17_ctv_update_rescaler(struct drm_encoder *encoder); + +/* TV hardware access functions */ + +static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, + uint32_t val) +{ + struct nouveau_device *device = nouveau_dev(dev); + nv_wr32(device, reg, val); +} + +static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg) +{ + struct nouveau_device *device = nouveau_dev(dev); + return nv_rd32(device, reg); +} + +static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, + uint8_t val) +{ + nv_write_ptv(dev, NV_PTV_TV_INDEX, reg); + nv_write_ptv(dev, NV_PTV_TV_DATA, val); +} + +static inline uint8_t nv_read_tv_enc(struct drm_device *dev, uint8_t reg) +{ + nv_write_ptv(dev, NV_PTV_TV_INDEX, reg); + return nv_read_ptv(dev, NV_PTV_TV_DATA); +} + +#define nv_load_ptv(dev, state, reg) \ + nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg) +#define nv_save_ptv(dev, state, reg) \ + state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg) +#define nv_load_tv_enc(dev, state, reg) \ + nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg]) + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c new file mode 100644 index 0000000..1c4c6c9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -0,0 +1,493 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <core/object.h> +#include <core/client.h> +#include <core/device.h> +#include <core/class.h> +#include <core/mm.h> + +#include <subdev/fb.h> +#include <subdev/timer.h> +#include <subdev/instmem.h> +#include <engine/graph.h> + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_gem.h" +#include "nouveau_chan.h" +#include "nouveau_abi16.h" + +struct nouveau_abi16 * +nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev) +{ + struct nouveau_cli *cli = nouveau_cli(file_priv); + mutex_lock(&cli->mutex); + if (!cli->abi16) { + struct nouveau_abi16 *abi16; + cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL); + if (cli->abi16) { + INIT_LIST_HEAD(&abi16->channels); + abi16->client = nv_object(cli); + + /* allocate device object targeting client's default + * device (ie. the one that belongs to the fd it + * opened) + */ + if (nouveau_object_new(abi16->client, NVDRM_CLIENT, + NVDRM_DEVICE, 0x0080, + &(struct nv_device_class) { + .device = ~0ULL, + }, + sizeof(struct nv_device_class), + &abi16->device) == 0) + return cli->abi16; + + kfree(cli->abi16); + cli->abi16 = NULL; + } + + mutex_unlock(&cli->mutex); + } + return cli->abi16; +} + +int +nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret) +{ + struct nouveau_cli *cli = (void *)abi16->client; + mutex_unlock(&cli->mutex); + return ret; +} + +u16 +nouveau_abi16_swclass(struct nouveau_drm *drm) +{ + switch (nv_device(drm->device)->card_type) { + case NV_04: + return 0x006e; + case NV_10: + case NV_20: + case NV_30: + case NV_40: + return 0x016e; + case NV_50: + return 0x506e; + case NV_C0: + case NV_D0: + case NV_E0: + return 0x906e; + } + + return 0x0000; +} + +static void +nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan, + struct nouveau_abi16_ntfy *ntfy) +{ + nouveau_mm_free(&chan->heap, &ntfy->node); + list_del(&ntfy->head); + kfree(ntfy); +} + +static void +nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, + struct nouveau_abi16_chan *chan) +{ + struct nouveau_abi16_ntfy *ntfy, *temp; + + /* wait for all activity to stop before releasing notify object, which + * may be still in use */ + if (chan->chan && chan->ntfy) + nouveau_channel_idle(chan->chan); + + /* cleanup notifier state */ + list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) { + nouveau_abi16_ntfy_fini(chan, ntfy); + } + + if (chan->ntfy) { + nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma); + drm_gem_object_unreference_unlocked(chan->ntfy->gem); + } + + if (chan->heap.block_size) + nouveau_mm_fini(&chan->heap); + + /* destroy channel object, all children will be killed too */ + if (chan->chan) { + abi16->handles &= ~(1 << (chan->chan->handle & 0xffff)); + nouveau_channel_del(&chan->chan); + } + + list_del(&chan->head); + kfree(chan); +} + +void +nouveau_abi16_fini(struct nouveau_abi16 *abi16) +{ + struct nouveau_cli *cli = (void *)abi16->client; + struct nouveau_abi16_chan *chan, *temp; + + /* cleanup channels */ + list_for_each_entry_safe(chan, temp, &abi16->channels, head) { + nouveau_abi16_chan_fini(abi16, chan); + } + + /* destroy the device object */ + nouveau_object_del(abi16->client, NVDRM_CLIENT, NVDRM_DEVICE); + + kfree(cli->abi16); + cli->abi16 = NULL; +} + +int +nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nv_device(drm->device); + struct nouveau_timer *ptimer = nouveau_timer(device); + struct nouveau_graph *graph = (void *)nouveau_engine(device, NVDEV_ENGINE_GR); + struct drm_nouveau_getparam *getparam = data; + + switch (getparam->param) { + case NOUVEAU_GETPARAM_CHIPSET_ID: + getparam->value = device->chipset; + break; + case NOUVEAU_GETPARAM_PCI_VENDOR: + getparam->value = dev->pci_vendor; + break; + case NOUVEAU_GETPARAM_PCI_DEVICE: + getparam->value = dev->pci_device; + break; + case NOUVEAU_GETPARAM_BUS_TYPE: + if (drm_pci_device_is_agp(dev)) + getparam->value = 0; + else + if (!pci_is_pcie(dev->pdev)) + getparam->value = 1; + else + getparam->value = 2; + break; + case NOUVEAU_GETPARAM_FB_SIZE: + getparam->value = drm->gem.vram_available; + break; + case NOUVEAU_GETPARAM_AGP_SIZE: + getparam->value = drm->gem.gart_available; + break; + case NOUVEAU_GETPARAM_VM_VRAM_BASE: + getparam->value = 0; /* deprecated */ + break; + case NOUVEAU_GETPARAM_PTIMER_TIME: + getparam->value = ptimer->read(ptimer); + break; + case NOUVEAU_GETPARAM_HAS_BO_USAGE: + getparam->value = 1; + break; + case NOUVEAU_GETPARAM_HAS_PAGEFLIP: + getparam->value = 1; + break; + case NOUVEAU_GETPARAM_GRAPH_UNITS: + getparam->value = graph->units ? graph->units(graph) : 0; + break; + default: + nv_debug(device, "unknown parameter %lld\n", getparam->param); + return -EINVAL; + } + + return 0; +} + +int +nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS) +{ + return -EINVAL; +} + +int +nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) +{ + struct drm_nouveau_channel_alloc *init = data; + struct nouveau_cli *cli = nouveau_cli(file_priv); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); + struct nouveau_abi16_chan *chan; + struct nouveau_client *client; + struct nouveau_device *device; + struct nouveau_instmem *imem; + struct nouveau_fb *pfb; + int ret; + + if (unlikely(!abi16)) + return -ENOMEM; + + if (!drm->channel) + return nouveau_abi16_put(abi16, -ENODEV); + + client = nv_client(abi16->client); + device = nv_device(abi16->device); + imem = nouveau_instmem(device); + pfb = nouveau_fb(device); + + /* hack to allow channel engine type specification on kepler */ + if (device->card_type >= NV_E0) { + if (init->fb_ctxdma_handle != ~0) + init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR; + else + init->fb_ctxdma_handle = init->tt_ctxdma_handle; + + /* allow flips to be executed if this is a graphics channel */ + init->tt_ctxdma_handle = 0; + if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR) + init->tt_ctxdma_handle = 1; + } + + if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) + return nouveau_abi16_put(abi16, -EINVAL); + + /* allocate "abi16 channel" data and make up a handle for it */ + init->channel = ffsll(~abi16->handles); + if (!init->channel--) + return nouveau_abi16_put(abi16, -ENOSPC); + + chan = kzalloc(sizeof(*chan), GFP_KERNEL); + if (!chan) + return nouveau_abi16_put(abi16, -ENOMEM); + + INIT_LIST_HEAD(&chan->notifiers); + list_add(&chan->head, &abi16->channels); + abi16->handles |= (1 << init->channel); + + /* create channel object and initialise dma and fence management */ + ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN | + init->channel, init->fb_ctxdma_handle, + init->tt_ctxdma_handle, &chan->chan); + if (ret) + goto done; + + if (device->card_type >= NV_50) + init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | + NOUVEAU_GEM_DOMAIN_GART; + else + if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) + init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; + else + init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; + + if (device->card_type < NV_C0) { + init->subchan[0].handle = 0x00000000; + init->subchan[0].grclass = 0x0000; + init->subchan[1].handle = NvSw; + init->subchan[1].grclass = 0x506e; + init->nr_subchan = 2; + } + + /* Named memory object area */ + ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART, + 0, 0, &chan->ntfy); + if (ret == 0) + ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT); + if (ret) + goto done; + + if (device->card_type >= NV_50) { + ret = nouveau_bo_vma_add(chan->ntfy, client->vm, + &chan->ntfy_vma); + if (ret) + goto done; + } + + ret = drm_gem_handle_create(file_priv, chan->ntfy->gem, + &init->notifier_handle); + if (ret) + goto done; + + ret = nouveau_mm_init(&chan->heap, 0, PAGE_SIZE, 1); +done: + if (ret) + nouveau_abi16_chan_fini(abi16, chan); + return nouveau_abi16_put(abi16, ret); +} + + +int +nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS) +{ + struct drm_nouveau_channel_free *req = data; + struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); + struct nouveau_abi16_chan *chan; + int ret = -ENOENT; + + if (unlikely(!abi16)) + return -ENOMEM; + + list_for_each_entry(chan, &abi16->channels, head) { + if (chan->chan->handle == (NVDRM_CHAN | req->channel)) { + nouveau_abi16_chan_fini(abi16, chan); + return nouveau_abi16_put(abi16, 0); + } + } + + return nouveau_abi16_put(abi16, ret); +} + +int +nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) +{ + struct drm_nouveau_grobj_alloc *init = data; + struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_object *object; + int ret; + + if (unlikely(!abi16)) + return -ENOMEM; + + if (init->handle == ~0) + return nouveau_abi16_put(abi16, -EINVAL); + + /* compatibility with userspace that assumes 506e for all chipsets */ + if (init->class == 0x506e) { + init->class = nouveau_abi16_swclass(drm); + if (init->class == 0x906e) + return nouveau_abi16_put(abi16, 0); + } + + ret = nouveau_object_new(abi16->client, NVDRM_CHAN | init->channel, + init->handle, init->class, NULL, 0, &object); + return nouveau_abi16_put(abi16, ret); +} + +int +nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) +{ + struct drm_nouveau_notifierobj_alloc *info = data; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nv_device(drm->device); + struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); + struct nouveau_abi16_chan *chan = NULL, *temp; + struct nouveau_abi16_ntfy *ntfy; + struct nouveau_object *object; + struct nv_dma_class args = {}; + int ret; + + if (unlikely(!abi16)) + return -ENOMEM; + + /* completely unnecessary for these chipsets... */ + if (unlikely(nv_device(abi16->device)->card_type >= NV_C0)) + return nouveau_abi16_put(abi16, -EINVAL); + + list_for_each_entry(temp, &abi16->channels, head) { + if (temp->chan->handle == (NVDRM_CHAN | info->channel)) { + chan = temp; + break; + } + } + + if (!chan) + return nouveau_abi16_put(abi16, -ENOENT); + + ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL); + if (!ntfy) + return nouveau_abi16_put(abi16, -ENOMEM); + + list_add(&ntfy->head, &chan->notifiers); + ntfy->handle = info->handle; + + ret = nouveau_mm_head(&chan->heap, 1, info->size, info->size, 1, + &ntfy->node); + if (ret) + goto done; + + args.start = ntfy->node->offset; + args.limit = ntfy->node->offset + ntfy->node->length - 1; + if (device->card_type >= NV_50) { + args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; + args.start += chan->ntfy_vma.offset; + args.limit += chan->ntfy_vma.offset; + } else + if (drm->agp.stat == ENABLED) { + args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR; + args.start += drm->agp.base + chan->ntfy->bo.offset; + args.limit += drm->agp.base + chan->ntfy->bo.offset; + } else { + args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR; + args.start += chan->ntfy->bo.offset; + args.limit += chan->ntfy->bo.offset; + } + + ret = nouveau_object_new(abi16->client, chan->chan->handle, + ntfy->handle, 0x003d, &args, + sizeof(args), &object); + if (ret) + goto done; + +done: + if (ret) + nouveau_abi16_ntfy_fini(chan, ntfy); + return nouveau_abi16_put(abi16, ret); +} + +int +nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) +{ + struct drm_nouveau_gpuobj_free *fini = data; + struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); + struct nouveau_abi16_chan *chan = NULL, *temp; + struct nouveau_abi16_ntfy *ntfy; + int ret; + + if (unlikely(!abi16)) + return -ENOMEM; + + list_for_each_entry(temp, &abi16->channels, head) { + if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) { + chan = temp; + break; + } + } + + if (!chan) + return nouveau_abi16_put(abi16, -ENOENT); + + /* synchronize with the user channel and destroy the gpu object */ + nouveau_channel_idle(chan->chan); + + ret = nouveau_object_del(abi16->client, chan->chan->handle, fini->handle); + if (ret) + return nouveau_abi16_put(abi16, ret); + + /* cleanup extra state if this object was a notifier */ + list_for_each_entry(ntfy, &chan->notifiers, head) { + if (ntfy->handle == fini->handle) { + nouveau_mm_free(&chan->heap, &ntfy->node); + list_del(&ntfy->head); + break; + } + } + + return nouveau_abi16_put(abi16, 0); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h new file mode 100644 index 0000000..9000408 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h @@ -0,0 +1,115 @@ +#ifndef __NOUVEAU_ABI16_H__ +#define __NOUVEAU_ABI16_H__ + +#define ABI16_IOCTL_ARGS \ + struct drm_device *dev, void *data, struct drm_file *file_priv + +int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS); +int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS); +int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS); +int nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS); +int nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS); +int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS); +int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS); + +struct nouveau_abi16_ntfy { + struct list_head head; + struct nouveau_mm_node *node; + u32 handle; +}; + +struct nouveau_abi16_chan { + struct list_head head; + struct nouveau_channel *chan; + struct list_head notifiers; + struct nouveau_bo *ntfy; + struct nouveau_vma ntfy_vma; + struct nouveau_mm heap; +}; + +struct nouveau_abi16 { + struct nouveau_object *client; + struct nouveau_object *device; + struct list_head channels; + u64 handles; +}; + +struct nouveau_drm; +struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *, struct drm_device *); +int nouveau_abi16_put(struct nouveau_abi16 *, int); +void nouveau_abi16_fini(struct nouveau_abi16 *); +u16 nouveau_abi16_swclass(struct nouveau_drm *); + +#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) +#define NOUVEAU_GEM_DOMAIN_GART (1 << 2) + +struct drm_nouveau_channel_alloc { + uint32_t fb_ctxdma_handle; + uint32_t tt_ctxdma_handle; + + int channel; + uint32_t pushbuf_domains; + + /* Notifier memory */ + uint32_t notifier_handle; + + /* DRM-enforced subchannel assignments */ + struct { + uint32_t handle; + uint32_t grclass; + } subchan[8]; + uint32_t nr_subchan; +}; + +struct drm_nouveau_channel_free { + int channel; +}; + +struct drm_nouveau_grobj_alloc { + int channel; + uint32_t handle; + int class; +}; + +struct drm_nouveau_notifierobj_alloc { + uint32_t channel; + uint32_t handle; + uint32_t size; + uint32_t offset; +}; + +struct drm_nouveau_gpuobj_free { + int channel; + uint32_t handle; +}; + +#define NOUVEAU_GETPARAM_PCI_VENDOR 3 +#define NOUVEAU_GETPARAM_PCI_DEVICE 4 +#define NOUVEAU_GETPARAM_BUS_TYPE 5 +#define NOUVEAU_GETPARAM_FB_SIZE 8 +#define NOUVEAU_GETPARAM_AGP_SIZE 9 +#define NOUVEAU_GETPARAM_CHIPSET_ID 11 +#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 +#define NOUVEAU_GETPARAM_GRAPH_UNITS 13 +#define NOUVEAU_GETPARAM_PTIMER_TIME 14 +#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15 +#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16 +struct drm_nouveau_getparam { + uint64_t param; + uint64_t value; +}; + +struct drm_nouveau_setparam { + uint64_t param; + uint64_t value; +}; + +#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam) +#define DRM_IOCTL_NOUVEAU_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam) +#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc) +#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free) +#define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc) +#define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc) +#define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free) + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c new file mode 100644 index 0000000..d97f200 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -0,0 +1,425 @@ +#include <linux/pci.h> +#include <linux/acpi.h> +#include <linux/slab.h> +#include <acpi/acpi_drivers.h> +#include <acpi/acpi_bus.h> +#include <acpi/video.h> +#include <acpi/acpi.h> +#include <linux/mxm-wmi.h> + +#include <linux/vga_switcheroo.h> + +#include <drm/drm_edid.h> + +#include "nouveau_drm.h" +#include "nouveau_acpi.h" + +#define NOUVEAU_DSM_LED 0x02 +#define NOUVEAU_DSM_LED_STATE 0x00 +#define NOUVEAU_DSM_LED_OFF 0x10 +#define NOUVEAU_DSM_LED_STAMINA 0x11 +#define NOUVEAU_DSM_LED_SPEED 0x12 + +#define NOUVEAU_DSM_POWER 0x03 +#define NOUVEAU_DSM_POWER_STATE 0x00 +#define NOUVEAU_DSM_POWER_SPEED 0x01 +#define NOUVEAU_DSM_POWER_STAMINA 0x02 + +#define NOUVEAU_DSM_OPTIMUS_FN 0x1A +#define NOUVEAU_DSM_OPTIMUS_ARGS 0x03000001 + +static struct nouveau_dsm_priv { + bool dsm_detected; + bool optimus_detected; + acpi_handle dhandle; + acpi_handle rom_handle; +} nouveau_dsm_priv; + +bool nouveau_is_optimus(void) { + return nouveau_dsm_priv.optimus_detected; +} + +bool nouveau_is_v1_dsm(void) { + return nouveau_dsm_priv.dsm_detected; +} + +#define NOUVEAU_DSM_HAS_MUX 0x1 +#define NOUVEAU_DSM_HAS_OPT 0x2 + +static const char nouveau_dsm_muid[] = { + 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, + 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, +}; + +static const char nouveau_op_dsm_muid[] = { + 0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47, + 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0, +}; + +static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result) +{ + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_object_list input; + union acpi_object params[4]; + union acpi_object *obj; + int i, err; + char args_buff[4]; + + input.count = 4; + input.pointer = params; + params[0].type = ACPI_TYPE_BUFFER; + params[0].buffer.length = sizeof(nouveau_op_dsm_muid); + params[0].buffer.pointer = (char *)nouveau_op_dsm_muid; + params[1].type = ACPI_TYPE_INTEGER; + params[1].integer.value = 0x00000100; + params[2].type = ACPI_TYPE_INTEGER; + params[2].integer.value = func; + params[3].type = ACPI_TYPE_BUFFER; + params[3].buffer.length = 4; + /* ACPI is little endian, AABBCCDD becomes {DD,CC,BB,AA} */ + for (i = 0; i < 4; i++) + args_buff[i] = (arg >> i * 8) & 0xFF; + params[3].buffer.pointer = args_buff; + + err = acpi_evaluate_object(handle, "_DSM", &input, &output); + if (err) { + printk(KERN_INFO "failed to evaluate _DSM: %d\n", err); + return err; + } + + obj = (union acpi_object *)output.pointer; + + if (obj->type == ACPI_TYPE_INTEGER) + if (obj->integer.value == 0x80000002) { + return -ENODEV; + } + + if (obj->type == ACPI_TYPE_BUFFER) { + if (obj->buffer.length == 4 && result) { + *result = 0; + *result |= obj->buffer.pointer[0]; + *result |= (obj->buffer.pointer[1] << 8); + *result |= (obj->buffer.pointer[2] << 16); + *result |= (obj->buffer.pointer[3] << 24); + } + } + + kfree(output.pointer); + return 0; +} + +static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result) +{ + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_object_list input; + union acpi_object params[4]; + union acpi_object *obj; + int err; + + input.count = 4; + input.pointer = params; + params[0].type = ACPI_TYPE_BUFFER; + params[0].buffer.length = sizeof(nouveau_dsm_muid); + params[0].buffer.pointer = (char *)nouveau_dsm_muid; + params[1].type = ACPI_TYPE_INTEGER; + params[1].integer.value = 0x00000102; + params[2].type = ACPI_TYPE_INTEGER; + params[2].integer.value = func; + params[3].type = ACPI_TYPE_INTEGER; + params[3].integer.value = arg; + + err = acpi_evaluate_object(handle, "_DSM", &input, &output); + if (err) { + printk(KERN_INFO "failed to evaluate _DSM: %d\n", err); + return err; + } + + obj = (union acpi_object *)output.pointer; + + if (obj->type == ACPI_TYPE_INTEGER) + if (obj->integer.value == 0x80000002) + return -ENODEV; + + if (obj->type == ACPI_TYPE_BUFFER) { + if (obj->buffer.length == 4 && result) { + *result = 0; + *result |= obj->buffer.pointer[0]; + *result |= (obj->buffer.pointer[1] << 8); + *result |= (obj->buffer.pointer[2] << 16); + *result |= (obj->buffer.pointer[3] << 24); + } + } + + kfree(output.pointer); + return 0; +} + +/* Returns 1 if a DSM function is usable and 0 otherwise */ +static int nouveau_test_dsm(acpi_handle test_handle, + int (*dsm_func)(acpi_handle, int, int, uint32_t *), + int sfnc) +{ + u32 result = 0; + + /* Function 0 returns a Buffer containing available functions. The args + * parameter is ignored for function 0, so just put 0 in it */ + if (dsm_func(test_handle, 0, 0, &result)) + return 0; + + /* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. If + * the n-th bit is enabled, function n is supported */ + return result & 1 && result & (1 << sfnc); +} + +static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id) +{ + mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0); + mxm_wmi_call_mxds(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0); + return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL); +} + +static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state) +{ + int arg; + if (state == VGA_SWITCHEROO_ON) + arg = NOUVEAU_DSM_POWER_SPEED; + else + arg = NOUVEAU_DSM_POWER_STAMINA; + nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL); + return 0; +} + +static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) +{ + if (!nouveau_dsm_priv.dsm_detected) + return 0; + if (id == VGA_SWITCHEROO_IGD) + return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA); + else + return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED); +} + +static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, + enum vga_switcheroo_state state) +{ + if (id == VGA_SWITCHEROO_IGD) + return 0; + + /* Optimus laptops have the card already disabled in + * nouveau_switcheroo_set_state */ + if (!nouveau_dsm_priv.dsm_detected) + return 0; + + return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); +} + +static int nouveau_dsm_get_client_id(struct pci_dev *pdev) +{ + /* easy option one - intel vendor ID means Integrated */ + if (pdev->vendor == PCI_VENDOR_ID_INTEL) + return VGA_SWITCHEROO_IGD; + + /* is this device on Bus 0? - this may need improving */ + if (pdev->bus->number == 0) + return VGA_SWITCHEROO_IGD; + + return VGA_SWITCHEROO_DIS; +} + +static struct vga_switcheroo_handler nouveau_dsm_handler = { + .switchto = nouveau_dsm_switchto, + .power_state = nouveau_dsm_power_state, + .get_client_id = nouveau_dsm_get_client_id, +}; + +static int nouveau_dsm_pci_probe(struct pci_dev *pdev) +{ + acpi_handle dhandle, nvidia_handle; + acpi_status status; + int retval = 0; + + dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + if (!dhandle) + return false; + + status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle); + if (ACPI_FAILURE(status)) { + return false; + } + + if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) + retval |= NOUVEAU_DSM_HAS_MUX; + + if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm, + NOUVEAU_DSM_OPTIMUS_FN)) + retval |= NOUVEAU_DSM_HAS_OPT; + + if (retval) + nouveau_dsm_priv.dhandle = dhandle; + + return retval; +} + +static bool nouveau_dsm_detect(void) +{ + char acpi_method_name[255] = { 0 }; + struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; + struct pci_dev *pdev = NULL; + int has_dsm = 0; + int has_optimus = 0; + int vga_count = 0; + bool guid_valid; + int retval; + bool ret = false; + + /* lookup the MXM GUID */ + guid_valid = mxm_wmi_supported(); + + if (guid_valid) + printk("MXM: GUID detected in BIOS\n"); + + /* now do DSM detection */ + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + vga_count++; + + retval = nouveau_dsm_pci_probe(pdev); + if (retval & NOUVEAU_DSM_HAS_MUX) + has_dsm |= 1; + if (retval & NOUVEAU_DSM_HAS_OPT) + has_optimus = 1; + } + + /* find the optimus DSM or the old v1 DSM */ + if (has_optimus == 1) { + acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, + &buffer); + printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n", + acpi_method_name); + nouveau_dsm_priv.optimus_detected = true; + ret = true; + } else if (vga_count == 2 && has_dsm && guid_valid) { + acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, + &buffer); + printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", + acpi_method_name); + nouveau_dsm_priv.dsm_detected = true; + ret = true; + } + + + return ret; +} + +void nouveau_register_dsm_handler(void) +{ + bool r; + + r = nouveau_dsm_detect(); + if (!r) + return; + + vga_switcheroo_register_handler(&nouveau_dsm_handler); +} + +/* Must be called for Optimus models before the card can be turned off */ +void nouveau_switcheroo_optimus_dsm(void) +{ + u32 result = 0; + if (!nouveau_dsm_priv.optimus_detected) + return; + + nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FN, + NOUVEAU_DSM_OPTIMUS_ARGS, &result); +} + +void nouveau_unregister_dsm_handler(void) +{ + if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected) + vga_switcheroo_unregister_handler(); +} + +/* retrieve the ROM in 4k blocks */ +static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, + int offset, int len) +{ + acpi_status status; + union acpi_object rom_arg_elements[2], *obj; + struct acpi_object_list rom_arg; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; + + rom_arg.count = 2; + rom_arg.pointer = &rom_arg_elements[0]; + + rom_arg_elements[0].type = ACPI_TYPE_INTEGER; + rom_arg_elements[0].integer.value = offset; + + rom_arg_elements[1].type = ACPI_TYPE_INTEGER; + rom_arg_elements[1].integer.value = len; + + status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer); + if (ACPI_FAILURE(status)) { + printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status)); + return -ENODEV; + } + obj = (union acpi_object *)buffer.pointer; + memcpy(bios+offset, obj->buffer.pointer, len); + kfree(buffer.pointer); + return len; +} + +bool nouveau_acpi_rom_supported(struct pci_dev *pdev) +{ + acpi_status status; + acpi_handle dhandle, rom_handle; + + if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) + return false; + + dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + if (!dhandle) + return false; + + status = acpi_get_handle(dhandle, "_ROM", &rom_handle); + if (ACPI_FAILURE(status)) + return false; + + nouveau_dsm_priv.rom_handle = rom_handle; + return true; +} + +int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) +{ + return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len); +} + +void * +nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) +{ + struct acpi_device *acpidev; + acpi_handle handle; + int type, ret; + void *edid; + + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_LVDS: + case DRM_MODE_CONNECTOR_eDP: + type = ACPI_VIDEO_DISPLAY_LCD; + break; + default: + return NULL; + } + + handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); + if (!handle) + return NULL; + + ret = acpi_bus_get_device(handle, &acpidev); + if (ret) + return NULL; + + ret = acpi_video_get_edid(acpidev, type, -1, &edid); + if (ret < 0) + return NULL; + + return kmemdup(edid, EDID_LENGTH, GFP_KERNEL); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h new file mode 100644 index 0000000..74acf0f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h @@ -0,0 +1,26 @@ +#ifndef __NOUVEAU_ACPI_H__ +#define __NOUVEAU_ACPI_H__ + +#define ROM_BIOS_PAGE 4096 + +#if defined(CONFIG_ACPI) && defined(CONFIG_X86) +bool nouveau_is_optimus(void); +bool nouveau_is_v1_dsm(void); +void nouveau_register_dsm_handler(void); +void nouveau_unregister_dsm_handler(void); +void nouveau_switcheroo_optimus_dsm(void); +int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len); +bool nouveau_acpi_rom_supported(struct pci_dev *pdev); +void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *); +#else +static inline bool nouveau_is_optimus(void) { return false; }; +static inline bool nouveau_is_v1_dsm(void) { return false; }; +static inline void nouveau_register_dsm_handler(void) {} +static inline void nouveau_unregister_dsm_handler(void) {} +static inline void nouveau_switcheroo_optimus_dsm(void) {} +static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; } +static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; } +static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; } +#endif + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c new file mode 100644 index 0000000..6e7a55f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_agp.c @@ -0,0 +1,164 @@ +#include <linux/module.h> + +#include <core/device.h> + +#include "nouveau_drm.h" +#include "nouveau_agp.h" +#include "nouveau_reg.h" + +#if __OS_HAS_AGP +MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)"); +static int nouveau_agpmode = -1; +module_param_named(agpmode, nouveau_agpmode, int, 0400); + +static unsigned long +get_agp_mode(struct nouveau_drm *drm, unsigned long mode) +{ + struct nouveau_device *device = nv_device(drm->device); + + /* + * FW seems to be broken on nv18, it makes the card lock up + * randomly. + */ + if (device->chipset == 0x18) + mode &= ~PCI_AGP_COMMAND_FW; + + /* + * AGP mode set in the command line. + */ + if (nouveau_agpmode > 0) { + bool agpv3 = mode & 0x8; + int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode; + + mode = (mode & ~0x7) | (rate & 0x7); + } + + return mode; +} + +static bool +nouveau_agp_enabled(struct nouveau_drm *drm) +{ + struct drm_device *dev = drm->dev; + + if (!drm_pci_device_is_agp(dev) || !dev->agp) + return false; + + if (drm->agp.stat == UNKNOWN) { + if (!nouveau_agpmode) + return false; +#ifdef __powerpc__ + /* Disable AGP by default on all PowerPC machines for + * now -- At least some UniNorth-2 AGP bridges are + * known to be broken: DMA from the host to the card + * works just fine, but writeback from the card to the + * host goes straight to memory untranslated bypassing + * the GATT somehow, making them quite painful to deal + * with... + */ + if (nouveau_agpmode == -1) + return false; +#endif + return true; + } + + return (drm->agp.stat == ENABLED); +} +#endif + +void +nouveau_agp_reset(struct nouveau_drm *drm) +{ +#if __OS_HAS_AGP + struct nouveau_device *device = nv_device(drm->device); + struct drm_device *dev = drm->dev; + u32 save[2]; + int ret; + + if (!nouveau_agp_enabled(drm)) + return; + + /* First of all, disable fast writes, otherwise if it's + * already enabled in the AGP bridge and we disable the card's + * AGP controller we might be locking ourselves out of it. */ + if ((nv_rd32(device, NV04_PBUS_PCI_NV_19) | + dev->agp->mode) & PCI_AGP_COMMAND_FW) { + struct drm_agp_info info; + struct drm_agp_mode mode; + + ret = drm_agp_info(dev, &info); + if (ret) + return; + + mode.mode = get_agp_mode(drm, info.mode); + mode.mode &= ~PCI_AGP_COMMAND_FW; + + ret = drm_agp_enable(dev, mode); + if (ret) + return; + } + + + /* clear busmaster bit, and disable AGP */ + save[0] = nv_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000); + nv_wr32(device, NV04_PBUS_PCI_NV_19, 0); + + /* reset PGRAPH, PFIFO and PTIMER */ + save[1] = nv_mask(device, 0x000200, 0x00011100, 0x00000000); + nv_mask(device, 0x000200, 0x00011100, save[1]); + + /* and restore bustmaster bit (gives effect of resetting AGP) */ + nv_wr32(device, NV04_PBUS_PCI_NV_1, save[0]); +#endif +} + +void +nouveau_agp_init(struct nouveau_drm *drm) +{ +#if __OS_HAS_AGP + struct nouveau_device *device = nv_device(drm->device); + struct drm_device *dev = drm->dev; + struct drm_agp_info info; + struct drm_agp_mode mode; + int ret; + + if (!nouveau_agp_enabled(drm)) + return; + drm->agp.stat = DISABLE; + + ret = drm_agp_acquire(dev); + if (ret) { + nv_error(device, "unable to acquire AGP: %d\n", ret); + return; + } + + ret = drm_agp_info(dev, &info); + if (ret) { + nv_error(device, "unable to get AGP info: %d\n", ret); + return; + } + + /* see agp.h for the AGPSTAT_* modes available */ + mode.mode = get_agp_mode(drm, info.mode); + + ret = drm_agp_enable(dev, mode); + if (ret) { + nv_error(device, "unable to enable AGP: %d\n", ret); + return; + } + + drm->agp.stat = ENABLED; + drm->agp.base = info.aperture_base; + drm->agp.size = info.aperture_size; +#endif +} + +void +nouveau_agp_fini(struct nouveau_drm *drm) +{ +#if __OS_HAS_AGP + struct drm_device *dev = drm->dev; + if (dev->agp && dev->agp->acquired) + drm_agp_release(dev); +#endif +} diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.h b/drivers/gpu/drm/nouveau/nouveau_agp.h new file mode 100644 index 0000000..b55c086 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_agp.h @@ -0,0 +1,10 @@ +#ifndef __NOUVEAU_AGP_H__ +#define __NOUVEAU_AGP_H__ + +struct nouveau_drm; + +void nouveau_agp_reset(struct nouveau_drm *); +void nouveau_agp_init(struct nouveau_drm *); +void nouveau_agp_fini(struct nouveau_drm *); + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c new file mode 100644 index 0000000..2ffad21 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -0,0 +1,264 @@ +/* + * Copyright (C) 2009 Red Hat <mjg@redhat.com> + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* + * Authors: + * Matthew Garrett <mjg@redhat.com> + * + * Register locations derived from NVClock by Roderick Colenbrander + */ + +#include <linux/backlight.h> +#include <linux/acpi.h> + +#include "nouveau_drm.h" +#include "nouveau_reg.h" +#include "nouveau_encoder.h" + +static int +nv40_get_intensity(struct backlight_device *bd) +{ + struct nouveau_drm *drm = bl_get_data(bd); + struct nouveau_device *device = nv_device(drm->device); + int val = (nv_rd32(device, NV40_PMC_BACKLIGHT) & + NV40_PMC_BACKLIGHT_MASK) >> 16; + + return val; +} + +static int +nv40_set_intensity(struct backlight_device *bd) +{ + struct nouveau_drm *drm = bl_get_data(bd); + struct nouveau_device *device = nv_device(drm->device); + int val = bd->props.brightness; + int reg = nv_rd32(device, NV40_PMC_BACKLIGHT); + + nv_wr32(device, NV40_PMC_BACKLIGHT, + (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK)); + + return 0; +} + +static const struct backlight_ops nv40_bl_ops = { + .options = BL_CORE_SUSPENDRESUME, + .get_brightness = nv40_get_intensity, + .update_status = nv40_set_intensity, +}; + +static int +nv40_backlight_init(struct drm_connector *connector) +{ + struct nouveau_drm *drm = nouveau_drm(connector->dev); + struct nouveau_device *device = nv_device(drm->device); + struct backlight_properties props; + struct backlight_device *bd; + + if (!(nv_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)) + return 0; + + memset(&props, 0, sizeof(struct backlight_properties)); + props.type = BACKLIGHT_RAW; + props.max_brightness = 31; + bd = backlight_device_register("nv_backlight", &connector->kdev, drm, + &nv40_bl_ops, &props); + if (IS_ERR(bd)) + return PTR_ERR(bd); + drm->backlight = bd; + bd->props.brightness = nv40_get_intensity(bd); + backlight_update_status(bd); + + return 0; +} + +static int +nv50_get_intensity(struct backlight_device *bd) +{ + struct nouveau_encoder *nv_encoder = bl_get_data(bd); + struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); + struct nouveau_device *device = nv_device(drm->device); + int or = nv_encoder->or; + u32 div = 1025; + u32 val; + + val = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or)); + val &= NV50_PDISP_SOR_PWM_CTL_VAL; + return ((val * 100) + (div / 2)) / div; +} + +static int +nv50_set_intensity(struct backlight_device *bd) +{ + struct nouveau_encoder *nv_encoder = bl_get_data(bd); + struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); + struct nouveau_device *device = nv_device(drm->device); + int or = nv_encoder->or; + u32 div = 1025; + u32 val = (bd->props.brightness * div) / 100; + + nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), + NV50_PDISP_SOR_PWM_CTL_NEW | val); + return 0; +} + +static const struct backlight_ops nv50_bl_ops = { + .options = BL_CORE_SUSPENDRESUME, + .get_brightness = nv50_get_intensity, + .update_status = nv50_set_intensity, +}; + +static int +nva3_get_intensity(struct backlight_device *bd) +{ + struct nouveau_encoder *nv_encoder = bl_get_data(bd); + struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); + struct nouveau_device *device = nv_device(drm->device); + int or = nv_encoder->or; + u32 div, val; + + div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); + val = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or)); + val &= NVA3_PDISP_SOR_PWM_CTL_VAL; + if (div && div >= val) + return ((val * 100) + (div / 2)) / div; + + return 100; +} + +static int +nva3_set_intensity(struct backlight_device *bd) +{ + struct nouveau_encoder *nv_encoder = bl_get_data(bd); + struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); + struct nouveau_device *device = nv_device(drm->device); + int or = nv_encoder->or; + u32 div, val; + + div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); + val = (bd->props.brightness * div) / 100; + if (div) { + nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), val | + NV50_PDISP_SOR_PWM_CTL_NEW | + NVA3_PDISP_SOR_PWM_CTL_UNK); + return 0; + } + + return -EINVAL; +} + +static const struct backlight_ops nva3_bl_ops = { + .options = BL_CORE_SUSPENDRESUME, + .get_brightness = nva3_get_intensity, + .update_status = nva3_set_intensity, +}; + +static int +nv50_backlight_init(struct drm_connector *connector) +{ + struct nouveau_drm *drm = nouveau_drm(connector->dev); + struct nouveau_device *device = nv_device(drm->device); + struct nouveau_encoder *nv_encoder; + struct backlight_properties props; + struct backlight_device *bd; + const struct backlight_ops *ops; + + nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS); + if (!nv_encoder) { + nv_encoder = find_encoder(connector, DCB_OUTPUT_DP); + if (!nv_encoder) + return -ENODEV; + } + + if (!nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) + return 0; + + if (device->chipset <= 0xa0 || + device->chipset == 0xaa || + device->chipset == 0xac) + ops = &nv50_bl_ops; + else + ops = &nva3_bl_ops; + + memset(&props, 0, sizeof(struct backlight_properties)); + props.type = BACKLIGHT_RAW; + props.max_brightness = 100; + bd = backlight_device_register("nv_backlight", &connector->kdev, + nv_encoder, ops, &props); + if (IS_ERR(bd)) + return PTR_ERR(bd); + + drm->backlight = bd; + bd->props.brightness = bd->ops->get_brightness(bd); + backlight_update_status(bd); + return 0; +} + +int +nouveau_backlight_init(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nv_device(drm->device); + struct drm_connector *connector; + +#ifdef CONFIG_ACPI + if (acpi_video_backlight_support()) { + NV_INFO(drm, "ACPI backlight interface available, " + "not registering our own\n"); + return 0; + } +#endif + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && + connector->connector_type != DRM_MODE_CONNECTOR_eDP) + continue; + + switch (device->card_type) { + case NV_40: + return nv40_backlight_init(connector); + case NV_50: + case NV_C0: + case NV_D0: + case NV_E0: + return nv50_backlight_init(connector); + default: + break; + } + } + + + return 0; +} + +void +nouveau_backlight_exit(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + if (drm->backlight) { + backlight_device_unregister(drm->backlight); + drm->backlight = NULL; + } +} diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c new file mode 100644 index 0000000..6aa2137 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -0,0 +1,2121 @@ +/* + * Copyright 2005-2006 Erik Waling + * Copyright 2006 Stephane Marchesin + * Copyright 2007-2009 Stuart Bennett + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF + * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <subdev/bios.h> + +#include <drm/drmP.h> + +#include "nouveau_drm.h" +#include "nouveau_reg.h" +#include "dispnv04/hw.h" +#include "nouveau_encoder.h" + +#include <linux/io-mapping.h> +#include <linux/firmware.h> + +/* these defines are made up */ +#define NV_CIO_CRE_44_HEADA 0x0 +#define NV_CIO_CRE_44_HEADB 0x3 +#define FEATURE_MOBILE 0x10 /* also FEATURE_QUADRO for BMP */ + +#define EDID1_LEN 128 + +#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg) +#define LOG_OLD_VALUE(x) + +struct init_exec { + bool execute; + bool repeat; +}; + +static bool nv_cksum(const uint8_t *data, unsigned int length) +{ + /* + * There's a few checksums in the BIOS, so here's a generic checking + * function. + */ + int i; + uint8_t sum = 0; + + for (i = 0; i < length; i++) + sum += data[i]; + + if (sum) + return true; + + return false; +} + +static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk) +{ + int compare_record_len, i = 0; + uint16_t compareclk, scriptptr = 0; + + if (bios->major_version < 5) /* pre BIT */ + compare_record_len = 3; + else + compare_record_len = 4; + + do { + compareclk = ROM16(bios->data[clktable + compare_record_len * i]); + if (pxclk >= compareclk * 10) { + if (bios->major_version < 5) { + uint8_t tmdssub = bios->data[clktable + 2 + compare_record_len * i]; + scriptptr = ROM16(bios->data[bios->init_script_tbls_ptr + tmdssub * 2]); + } else + scriptptr = ROM16(bios->data[clktable + 2 + compare_record_len * i]); + break; + } + i++; + } while (compareclk); + + return scriptptr; +} + +static void +run_digital_op_script(struct drm_device *dev, uint16_t scriptptr, + struct dcb_output *dcbent, int head, bool dl) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + NV_INFO(drm, "0x%04X: Parsing digital output script table\n", + scriptptr); + NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, head ? NV_CIO_CRE_44_HEADB : + NV_CIO_CRE_44_HEADA); + nouveau_bios_run_init_table(dev, scriptptr, dcbent, head); + + nv04_dfp_bind_head(dev, dcbent, head, dl); +} + +static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvbios *bios = &drm->vbios; + uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & DCB_OUTPUT_C ? 1 : 0); + uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]); + + if (!bios->fp.xlated_entry || !sub || !scriptofs) + return -EINVAL; + + run_digital_op_script(dev, scriptofs, dcbent, head, bios->fp.dual_link); + + if (script == LVDS_PANEL_OFF) { + /* off-on delay in ms */ + mdelay(ROM16(bios->data[bios->fp.xlated_entry + 7])); + } +#ifdef __powerpc__ + /* Powerbook specific quirks */ + if (script == LVDS_RESET && + (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 || + dev->pci_device == 0x0329)) + nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); +#endif + + return 0; +} + +static int run_lvds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script, int pxclk) +{ + /* + * The BIT LVDS table's header has the information to setup the + * necessary registers. Following the standard 4 byte header are: + * A bitmask byte and a dual-link transition pxclk value for use in + * selecting the init script when not using straps; 4 script pointers + * for panel power, selected by output and on/off; and 8 table pointers + * for panel init, the needed one determined by output, and bits in the + * conf byte. These tables are similar to the TMDS tables, consisting + * of a list of pxclks and script pointers. + */ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvbios *bios = &drm->vbios; + unsigned int outputset = (dcbent->or == 4) ? 1 : 0; + uint16_t scriptptr = 0, clktable; + + /* + * For now we assume version 3.0 table - g80 support will need some + * changes + */ + + switch (script) { + case LVDS_INIT: + return -ENOSYS; + case LVDS_BACKLIGHT_ON: + case LVDS_PANEL_ON: + scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 7 + outputset * 2]); + break; + case LVDS_BACKLIGHT_OFF: + case LVDS_PANEL_OFF: + scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]); + break; + case LVDS_RESET: + clktable = bios->fp.lvdsmanufacturerpointer + 15; + if (dcbent->or == 4) + clktable += 8; + + if (dcbent->lvdsconf.use_straps_for_mode) { + if (bios->fp.dual_link) + clktable += 4; + if (bios->fp.if_is_24bit) + clktable += 2; + } else { + /* using EDID */ + int cmpval_24bit = (dcbent->or == 4) ? 4 : 1; + + if (bios->fp.dual_link) { + clktable += 4; + cmpval_24bit <<= 1; + } + + if (bios->fp.strapless_is_24bit & cmpval_24bit) + clktable += 2; + } + + clktable = ROM16(bios->data[clktable]); + if (!clktable) { + NV_ERROR(drm, "Pixel clock comparison table not found\n"); + return -ENOENT; + } + scriptptr = clkcmptable(bios, clktable, pxclk); + } + + if (!scriptptr) { + NV_ERROR(drm, "LVDS output init script not found\n"); + return -ENOENT; + } + run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link); + + return 0; +} + +int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script, int pxclk) +{ + /* + * LVDS operations are multiplexed in an effort to present a single API + * which works with two vastly differing underlying structures. + * This acts as the demux + */ + + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nv_device(drm->device); + struct nvbios *bios = &drm->vbios; + uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; + uint32_t sel_clk_binding, sel_clk; + int ret; + + if (bios->fp.last_script_invoc == (script << 1 | head) || !lvds_ver || + (lvds_ver >= 0x30 && script == LVDS_INIT)) + return 0; + + if (!bios->fp.lvds_init_run) { + bios->fp.lvds_init_run = true; + call_lvds_script(dev, dcbent, head, LVDS_INIT, pxclk); + } + + if (script == LVDS_PANEL_ON && bios->fp.reset_after_pclk_change) + call_lvds_script(dev, dcbent, head, LVDS_RESET, pxclk); + if (script == LVDS_RESET && bios->fp.power_off_for_reset) + call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk); + + NV_INFO(drm, "Calling LVDS script %d:\n", script); + + /* don't let script change pll->head binding */ + sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000; + + if (lvds_ver < 0x30) + ret = call_lvds_manufacturer_script(dev, dcbent, head, script); + else + ret = run_lvds_table(dev, dcbent, head, script, pxclk); + + bios->fp.last_script_invoc = (script << 1 | head); + + sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); + /* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */ + nv_wr32(device, NV_PBUS_POWERCTRL_2, 0); + + return ret; +} + +struct lvdstableheader { + uint8_t lvds_ver, headerlen, recordlen; +}; + +static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct nvbios *bios, struct lvdstableheader *lth) +{ + /* + * BMP version (0xa) LVDS table has a simple header of version and + * record length. The BIT LVDS table has the typical BIT table header: + * version byte, header length byte, record length byte, and a byte for + * the maximum number of records that can be held in the table. + */ + + struct nouveau_drm *drm = nouveau_drm(dev); + uint8_t lvds_ver, headerlen, recordlen; + + memset(lth, 0, sizeof(struct lvdstableheader)); + + if (bios->fp.lvdsmanufacturerpointer == 0x0) { + NV_ERROR(drm, "Pointer to LVDS manufacturer table invalid\n"); + return -EINVAL; + } + + lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; + + switch (lvds_ver) { + case 0x0a: /* pre NV40 */ + headerlen = 2; + recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1]; + break; + case 0x30: /* NV4x */ + headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1]; + if (headerlen < 0x1f) { + NV_ERROR(drm, "LVDS table header not understood\n"); + return -EINVAL; + } + recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2]; + break; + case 0x40: /* G80/G90 */ + headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1]; + if (headerlen < 0x7) { + NV_ERROR(drm, "LVDS table header not understood\n"); + return -EINVAL; + } + recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2]; + break; + default: + NV_ERROR(drm, + "LVDS table revision %d.%d not currently supported\n", + lvds_ver >> 4, lvds_ver & 0xf); + return -ENOSYS; + } + + lth->lvds_ver = lvds_ver; + lth->headerlen = headerlen; + lth->recordlen = recordlen; + + return 0; +} + +static int +get_fp_strap(struct drm_device *dev, struct nvbios *bios) +{ + struct nouveau_device *device = nouveau_dev(dev); + + /* + * The fp strap is normally dictated by the "User Strap" in + * PEXTDEV_BOOT_0[20:16], but on BMP cards when bit 2 of the + * Internal_Flags struct at 0x48 is set, the user strap gets overriden + * by the PCI subsystem ID during POST, but not before the previous user + * strap has been committed to CR58 for CR57=0xf on head A, which may be + * read and used instead + */ + + if (bios->major_version < 5 && bios->data[0x48] & 0x4) + return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf; + + if (device->card_type >= NV_50) + return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf; + else + return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf; +} + +static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + uint8_t *fptable; + uint8_t fptable_ver, headerlen = 0, recordlen, fpentries = 0xf, fpindex; + int ret, ofs, fpstrapping; + struct lvdstableheader lth; + + if (bios->fp.fptablepointer == 0x0) { + /* Apple cards don't have the fp table; the laptops use DDC */ + /* The table is also missing on some x86 IGPs */ +#ifndef __powerpc__ + NV_ERROR(drm, "Pointer to flat panel table invalid\n"); +#endif + bios->digital_min_front_porch = 0x4b; + return 0; + } + + fptable = &bios->data[bios->fp.fptablepointer]; + fptable_ver = fptable[0]; + + switch (fptable_ver) { + /* + * BMP version 0x5.0x11 BIOSen have version 1 like tables, but no + * version field, and miss one of the spread spectrum/PWM bytes. + * This could affect early GF2Go parts (not seen any appropriate ROMs + * though). Here we assume that a version of 0x05 matches this case + * (combining with a BMP version check would be better), as the + * common case for the panel type field is 0x0005, and that is in + * fact what we are reading the first byte of. + */ + case 0x05: /* some NV10, 11, 15, 16 */ + recordlen = 42; + ofs = -1; + break; + case 0x10: /* some NV15/16, and NV11+ */ + recordlen = 44; + ofs = 0; + break; + case 0x20: /* NV40+ */ + headerlen = fptable[1]; + recordlen = fptable[2]; + fpentries = fptable[3]; + /* + * fptable[4] is the minimum + * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap + */ + bios->digital_min_front_porch = fptable[4]; + ofs = -7; + break; + default: + NV_ERROR(drm, + "FP table revision %d.%d not currently supported\n", + fptable_ver >> 4, fptable_ver & 0xf); + return -ENOSYS; + } + + if (!bios->is_mobile) /* !mobile only needs digital_min_front_porch */ + return 0; + + ret = parse_lvds_manufacturer_table_header(dev, bios, <h); + if (ret) + return ret; + + if (lth.lvds_ver == 0x30 || lth.lvds_ver == 0x40) { + bios->fp.fpxlatetableptr = bios->fp.lvdsmanufacturerpointer + + lth.headerlen + 1; + bios->fp.xlatwidth = lth.recordlen; + } + if (bios->fp.fpxlatetableptr == 0x0) { + NV_ERROR(drm, "Pointer to flat panel xlat table invalid\n"); + return -EINVAL; + } + + fpstrapping = get_fp_strap(dev, bios); + + fpindex = bios->data[bios->fp.fpxlatetableptr + + fpstrapping * bios->fp.xlatwidth]; + + if (fpindex > fpentries) { + NV_ERROR(drm, "Bad flat panel table index\n"); + return -ENOENT; + } + + /* nv4x cards need both a strap value and fpindex of 0xf to use DDC */ + if (lth.lvds_ver > 0x10) + bios->fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf; + + /* + * If either the strap or xlated fpindex value are 0xf there is no + * panel using a strap-derived bios mode present. this condition + * includes, but is different from, the DDC panel indicator above + */ + if (fpstrapping == 0xf || fpindex == 0xf) + return 0; + + bios->fp.mode_ptr = bios->fp.fptablepointer + headerlen + + recordlen * fpindex + ofs; + + NV_INFO(drm, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n", + ROM16(bios->data[bios->fp.mode_ptr + 11]) + 1, + ROM16(bios->data[bios->fp.mode_ptr + 25]) + 1, + ROM16(bios->data[bios->fp.mode_ptr + 7]) * 10); + + return 0; +} + +bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvbios *bios = &drm->vbios; + uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr]; + + if (!mode) /* just checking whether we can produce a mode */ + return bios->fp.mode_ptr; + + memset(mode, 0, sizeof(struct drm_display_mode)); + /* + * For version 1.0 (version in byte 0): + * bytes 1-2 are "panel type", including bits on whether Colour/mono, + * single/dual link, and type (TFT etc.) + * bytes 3-6 are bits per colour in RGBX + */ + mode->clock = ROM16(mode_entry[7]) * 10; + /* bytes 9-10 is HActive */ + mode->hdisplay = ROM16(mode_entry[11]) + 1; + /* + * bytes 13-14 is HValid Start + * bytes 15-16 is HValid End + */ + mode->hsync_start = ROM16(mode_entry[17]) + 1; + mode->hsync_end = ROM16(mode_entry[19]) + 1; + mode->htotal = ROM16(mode_entry[21]) + 1; + /* bytes 23-24, 27-30 similarly, but vertical */ + mode->vdisplay = ROM16(mode_entry[25]) + 1; + mode->vsync_start = ROM16(mode_entry[31]) + 1; + mode->vsync_end = ROM16(mode_entry[33]) + 1; + mode->vtotal = ROM16(mode_entry[35]) + 1; + mode->flags |= (mode_entry[37] & 0x10) ? + DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; + mode->flags |= (mode_entry[37] & 0x1) ? + DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; + /* + * bytes 38-39 relate to spread spectrum settings + * bytes 40-43 are something to do with PWM + */ + + mode->status = MODE_OK; + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_set_name(mode); + return bios->fp.mode_ptr; +} + +int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, bool *if_is_24bit) +{ + /* + * The LVDS table header is (mostly) described in + * parse_lvds_manufacturer_table_header(): the BIT header additionally + * contains the dual-link transition pxclk (in 10s kHz), at byte 5 - if + * straps are not being used for the panel, this specifies the frequency + * at which modes should be set up in the dual link style. + * + * Following the header, the BMP (ver 0xa) table has several records, + * indexed by a separate xlat table, indexed in turn by the fp strap in + * EXTDEV_BOOT. Each record had a config byte, followed by 6 script + * numbers for use by INIT_SUB which controlled panel init and power, + * and finally a dword of ms to sleep between power off and on + * operations. + * + * In the BIT versions, the table following the header serves as an + * integrated config and xlat table: the records in the table are + * indexed by the FP strap nibble in EXTDEV_BOOT, and each record has + * two bytes - the first as a config byte, the second for indexing the + * fp mode table pointed to by the BIT 'D' table + * + * DDC is not used until after card init, so selecting the correct table + * entry and setting the dual link flag for EDID equipped panels, + * requiring tests against the native-mode pixel clock, cannot be done + * until later, when this function should be called with non-zero pxclk + */ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvbios *bios = &drm->vbios; + int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0; + struct lvdstableheader lth; + uint16_t lvdsofs; + int ret, chip_version = bios->chip_version; + + ret = parse_lvds_manufacturer_table_header(dev, bios, <h); + if (ret) + return ret; + + switch (lth.lvds_ver) { + case 0x0a: /* pre NV40 */ + lvdsmanufacturerindex = bios->data[ + bios->fp.fpxlatemanufacturertableptr + + fpstrapping]; + + /* we're done if this isn't the EDID panel case */ + if (!pxclk) + break; + + if (chip_version < 0x25) { + /* nv17 behaviour + * + * It seems the old style lvds script pointer is reused + * to select 18/24 bit colour depth for EDID panels. + */ + lvdsmanufacturerindex = + (bios->legacy.lvds_single_a_script_ptr & 1) ? + 2 : 0; + if (pxclk >= bios->fp.duallink_transition_clk) + lvdsmanufacturerindex++; + } else if (chip_version < 0x30) { + /* nv28 behaviour (off-chip encoder) + * + * nv28 does a complex dance of first using byte 121 of + * the EDID to choose the lvdsmanufacturerindex, then + * later attempting to match the EDID manufacturer and + * product IDs in a table (signature 'pidt' (panel id + * table?)), setting an lvdsmanufacturerindex of 0 and + * an fp strap of the match index (or 0xf if none) + */ + lvdsmanufacturerindex = 0; + } else { + /* nv31, nv34 behaviour */ + lvdsmanufacturerindex = 0; + if (pxclk >= bios->fp.duallink_transition_clk) + lvdsmanufacturerindex = 2; + if (pxclk >= 140000) + lvdsmanufacturerindex = 3; + } + + /* + * nvidia set the high nibble of (cr57=f, cr58) to + * lvdsmanufacturerindex in this case; we don't + */ + break; + case 0x30: /* NV4x */ + case 0x40: /* G80/G90 */ + lvdsmanufacturerindex = fpstrapping; + break; + default: + NV_ERROR(drm, "LVDS table revision not currently supported\n"); + return -ENOSYS; + } + + lvdsofs = bios->fp.xlated_entry = bios->fp.lvdsmanufacturerpointer + lth.headerlen + lth.recordlen * lvdsmanufacturerindex; + switch (lth.lvds_ver) { + case 0x0a: + bios->fp.power_off_for_reset = bios->data[lvdsofs] & 1; + bios->fp.reset_after_pclk_change = bios->data[lvdsofs] & 2; + bios->fp.dual_link = bios->data[lvdsofs] & 4; + bios->fp.link_c_increment = bios->data[lvdsofs] & 8; + *if_is_24bit = bios->data[lvdsofs] & 16; + break; + case 0x30: + case 0x40: + /* + * No sign of the "power off for reset" or "reset for panel + * on" bits, but it's safer to assume we should + */ + bios->fp.power_off_for_reset = true; + bios->fp.reset_after_pclk_change = true; + + /* + * It's ok lvdsofs is wrong for nv4x edid case; dual_link is + * over-written, and if_is_24bit isn't used + */ + bios->fp.dual_link = bios->data[lvdsofs] & 1; + bios->fp.if_is_24bit = bios->data[lvdsofs] & 2; + bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; + bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; + break; + } + + /* set dual_link flag for EDID case */ + if (pxclk && (chip_version < 0x25 || chip_version > 0x28)) + bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk); + + *dl = bios->fp.dual_link; + + return 0; +} + +int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk) +{ + /* + * the pxclk parameter is in kHz + * + * This runs the TMDS regs setting code found on BIT bios cards + * + * For ffs(or) == 1 use the first table, for ffs(or) == 2 and + * ffs(or) == 3, use the second. + */ + + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nv_device(drm->device); + struct nvbios *bios = &drm->vbios; + int cv = bios->chip_version; + uint16_t clktable = 0, scriptptr; + uint32_t sel_clk_binding, sel_clk; + + /* pre-nv17 off-chip tmds uses scripts, post nv17 doesn't */ + if (cv >= 0x17 && cv != 0x1a && cv != 0x20 && + dcbent->location != DCB_LOC_ON_CHIP) + return 0; + + switch (ffs(dcbent->or)) { + case 1: + clktable = bios->tmds.output0_script_ptr; + break; + case 2: + case 3: + clktable = bios->tmds.output1_script_ptr; + break; + } + + if (!clktable) { + NV_ERROR(drm, "Pixel clock comparison table not found\n"); + return -EINVAL; + } + + scriptptr = clkcmptable(bios, clktable, pxclk); + + if (!scriptptr) { + NV_ERROR(drm, "TMDS output init script not found\n"); + return -ENOENT; + } + + /* don't let script change pll->head binding */ + sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000; + run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000); + sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; + NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); + + return 0; +} + +static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset) +{ + /* + * Parses the init table segment for pointers used in script execution. + * + * offset + 0 (16 bits): init script tables pointer + * offset + 2 (16 bits): macro index table pointer + * offset + 4 (16 bits): macro table pointer + * offset + 6 (16 bits): condition table pointer + * offset + 8 (16 bits): io condition table pointer + * offset + 10 (16 bits): io flag condition table pointer + * offset + 12 (16 bits): init function table pointer + */ + + bios->init_script_tbls_ptr = ROM16(bios->data[offset]); +} + +static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) +{ + /* + * Parses the load detect values for g80 cards. + * + * offset + 0 (16 bits): loadval table pointer + */ + + struct nouveau_drm *drm = nouveau_drm(dev); + uint16_t load_table_ptr; + uint8_t version, headerlen, entrylen, num_entries; + + if (bitentry->length != 3) { + NV_ERROR(drm, "Do not understand BIT A table\n"); + return -EINVAL; + } + + load_table_ptr = ROM16(bios->data[bitentry->offset]); + + if (load_table_ptr == 0x0) { + NV_DEBUG(drm, "Pointer to BIT loadval table invalid\n"); + return -EINVAL; + } + + version = bios->data[load_table_ptr]; + + if (version != 0x10) { + NV_ERROR(drm, "BIT loadval table version %d.%d not supported\n", + version >> 4, version & 0xF); + return -ENOSYS; + } + + headerlen = bios->data[load_table_ptr + 1]; + entrylen = bios->data[load_table_ptr + 2]; + num_entries = bios->data[load_table_ptr + 3]; + + if (headerlen != 4 || entrylen != 4 || num_entries != 2) { + NV_ERROR(drm, "Do not understand BIT loadval table\n"); + return -EINVAL; + } + + /* First entry is normal dac, 2nd tv-out perhaps? */ + bios->dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff; + + return 0; +} + +static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) +{ + /* + * Parses the flat panel table segment that the bit entry points to. + * Starting at bitentry->offset: + * + * offset + 0 (16 bits): ??? table pointer - seems to have 18 byte + * records beginning with a freq. + * offset + 2 (16 bits): mode table pointer + */ + struct nouveau_drm *drm = nouveau_drm(dev); + + if (bitentry->length != 4) { + NV_ERROR(drm, "Do not understand BIT display table\n"); + return -EINVAL; + } + + bios->fp.fptablepointer = ROM16(bios->data[bitentry->offset + 2]); + + return 0; +} + +static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) +{ + /* + * Parses the init table segment that the bit entry points to. + * + * See parse_script_table_pointers for layout + */ + struct nouveau_drm *drm = nouveau_drm(dev); + + if (bitentry->length < 14) { + NV_ERROR(drm, "Do not understand init table\n"); + return -EINVAL; + } + + parse_script_table_pointers(bios, bitentry->offset); + return 0; +} + +static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) +{ + /* + * BIT 'i' (info?) table + * + * offset + 0 (32 bits): BIOS version dword (as in B table) + * offset + 5 (8 bits): BIOS feature byte (same as for BMP?) + * offset + 13 (16 bits): pointer to table containing DAC load + * detection comparison values + * + * There's other things in the table, purpose unknown + */ + + struct nouveau_drm *drm = nouveau_drm(dev); + uint16_t daccmpoffset; + uint8_t dacver, dacheaderlen; + + if (bitentry->length < 6) { + NV_ERROR(drm, "BIT i table too short for needed information\n"); + return -EINVAL; + } + + /* + * bit 4 seems to indicate a mobile bios (doesn't suffer from BMP's + * Quadro identity crisis), other bits possibly as for BMP feature byte + */ + bios->feature_byte = bios->data[bitentry->offset + 5]; + bios->is_mobile = bios->feature_byte & FEATURE_MOBILE; + + if (bitentry->length < 15) { + NV_WARN(drm, "BIT i table not long enough for DAC load " + "detection comparison table\n"); + return -EINVAL; + } + + daccmpoffset = ROM16(bios->data[bitentry->offset + 13]); + + /* doesn't exist on g80 */ + if (!daccmpoffset) + return 0; + + /* + * The first value in the table, following the header, is the + * comparison value, the second entry is a comparison value for + * TV load detection. + */ + + dacver = bios->data[daccmpoffset]; + dacheaderlen = bios->data[daccmpoffset + 1]; + + if (dacver != 0x00 && dacver != 0x10) { + NV_WARN(drm, "DAC load detection comparison table version " + "%d.%d not known\n", dacver >> 4, dacver & 0xf); + return -ENOSYS; + } + + bios->dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]); + bios->tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]); + + return 0; +} + +static int parse_bit_lvds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) +{ + /* + * Parses the LVDS table segment that the bit entry points to. + * Starting at bitentry->offset: + * + * offset + 0 (16 bits): LVDS strap xlate table pointer + */ + + struct nouveau_drm *drm = nouveau_drm(dev); + + if (bitentry->length != 2) { + NV_ERROR(drm, "Do not understand BIT LVDS table\n"); + return -EINVAL; + } + + /* + * No idea if it's still called the LVDS manufacturer table, but + * the concept's close enough. + */ + bios->fp.lvdsmanufacturerpointer = ROM16(bios->data[bitentry->offset]); + + return 0; +} + +static int +parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios, + struct bit_entry *bitentry) +{ + /* + * offset + 2 (8 bits): number of options in an + * INIT_RAM_RESTRICT_ZM_REG_GROUP opcode option set + * offset + 3 (16 bits): pointer to strap xlate table for RAM + * restrict option selection + * + * There's a bunch of bits in this table other than the RAM restrict + * stuff that we don't use - their use currently unknown + */ + + /* + * Older bios versions don't have a sufficiently long table for + * what we want + */ + if (bitentry->length < 0x5) + return 0; + + if (bitentry->version < 2) { + bios->ram_restrict_group_count = bios->data[bitentry->offset + 2]; + bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]); + } else { + bios->ram_restrict_group_count = bios->data[bitentry->offset + 0]; + bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 1]); + } + + return 0; +} + +static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) +{ + /* + * Parses the pointer to the TMDS table + * + * Starting at bitentry->offset: + * + * offset + 0 (16 bits): TMDS table pointer + * + * The TMDS table is typically found just before the DCB table, with a + * characteristic signature of 0x11,0x13 (1.1 being version, 0x13 being + * length?) + * + * At offset +7 is a pointer to a script, which I don't know how to + * run yet. + * At offset +9 is a pointer to another script, likewise + * Offset +11 has a pointer to a table where the first word is a pxclk + * frequency and the second word a pointer to a script, which should be + * run if the comparison pxclk frequency is less than the pxclk desired. + * This repeats for decreasing comparison frequencies + * Offset +13 has a pointer to a similar table + * The selection of table (and possibly +7/+9 script) is dictated by + * "or" from the DCB. + */ + + struct nouveau_drm *drm = nouveau_drm(dev); + uint16_t tmdstableptr, script1, script2; + + if (bitentry->length != 2) { + NV_ERROR(drm, "Do not understand BIT TMDS table\n"); + return -EINVAL; + } + + tmdstableptr = ROM16(bios->data[bitentry->offset]); + if (!tmdstableptr) { + NV_ERROR(drm, "Pointer to TMDS table invalid\n"); + return -EINVAL; + } + + NV_INFO(drm, "TMDS table version %d.%d\n", + bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf); + + /* nv50+ has v2.0, but we don't parse it atm */ + if (bios->data[tmdstableptr] != 0x11) + return -ENOSYS; + + /* + * These two scripts are odd: they don't seem to get run even when + * they are not stubbed. + */ + script1 = ROM16(bios->data[tmdstableptr + 7]); + script2 = ROM16(bios->data[tmdstableptr + 9]); + if (bios->data[script1] != 'q' || bios->data[script2] != 'q') + NV_WARN(drm, "TMDS table script pointers not stubbed\n"); + + bios->tmds.output0_script_ptr = ROM16(bios->data[tmdstableptr + 11]); + bios->tmds.output1_script_ptr = ROM16(bios->data[tmdstableptr + 13]); + + return 0; +} + +struct bit_table { + const char id; + int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *); +}; + +#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry }) + +int +bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvbios *bios = &drm->vbios; + u8 entries, *entry; + + if (bios->type != NVBIOS_BIT) + return -ENODEV; + + entries = bios->data[bios->offset + 10]; + entry = &bios->data[bios->offset + 12]; + while (entries--) { + if (entry[0] == id) { + bit->id = entry[0]; + bit->version = entry[1]; + bit->length = ROM16(entry[2]); + bit->offset = ROM16(entry[4]); + bit->data = ROMPTR(dev, entry[4]); + return 0; + } + + entry += bios->data[bios->offset + 9]; + } + + return -ENOENT; +} + +static int +parse_bit_table(struct nvbios *bios, const uint16_t bitoffset, + struct bit_table *table) +{ + struct drm_device *dev = bios->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct bit_entry bitentry; + + if (bit_table(dev, table->id, &bitentry) == 0) + return table->parse_fn(dev, bios, &bitentry); + + NV_INFO(drm, "BIT table '%c' not found\n", table->id); + return -ENOSYS; +} + +static int +parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset) +{ + int ret; + + /* + * The only restriction on parsing order currently is having 'i' first + * for use of bios->*_version or bios->feature_byte while parsing; + * functions shouldn't be actually *doing* anything apart from pulling + * data from the image into the bios struct, thus no interdependencies + */ + ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('i', i)); + if (ret) /* info? */ + return ret; + if (bios->major_version >= 0x60) /* g80+ */ + parse_bit_table(bios, bitoffset, &BIT_TABLE('A', A)); + parse_bit_table(bios, bitoffset, &BIT_TABLE('D', display)); + ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('I', init)); + if (ret) + return ret; + parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */ + parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds)); + parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds)); + + return 0; +} + +static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsigned int offset) +{ + /* + * Parses the BMP structure for useful things, but does not act on them + * + * offset + 5: BMP major version + * offset + 6: BMP minor version + * offset + 9: BMP feature byte + * offset + 10: BCD encoded BIOS version + * + * offset + 18: init script table pointer (for bios versions < 5.10h) + * offset + 20: extra init script table pointer (for bios + * versions < 5.10h) + * + * offset + 24: memory init table pointer (used on early bios versions) + * offset + 26: SDR memory sequencing setup data table + * offset + 28: DDR memory sequencing setup data table + * + * offset + 54: index of I2C CRTC pair to use for CRT output + * offset + 55: index of I2C CRTC pair to use for TV output + * offset + 56: index of I2C CRTC pair to use for flat panel output + * offset + 58: write CRTC index for I2C pair 0 + * offset + 59: read CRTC index for I2C pair 0 + * offset + 60: write CRTC index for I2C pair 1 + * offset + 61: read CRTC index for I2C pair 1 + * + * offset + 67: maximum internal PLL frequency (single stage PLL) + * offset + 71: minimum internal PLL frequency (single stage PLL) + * + * offset + 75: script table pointers, as described in + * parse_script_table_pointers + * + * offset + 89: TMDS single link output A table pointer + * offset + 91: TMDS single link output B table pointer + * offset + 95: LVDS single link output A table pointer + * offset + 105: flat panel timings table pointer + * offset + 107: flat panel strapping translation table pointer + * offset + 117: LVDS manufacturer panel config table pointer + * offset + 119: LVDS manufacturer strapping translation table pointer + * + * offset + 142: PLL limits table pointer + * + * offset + 156: minimum pixel clock for LVDS dual link + */ + + struct nouveau_drm *drm = nouveau_drm(dev); + uint8_t *bmp = &bios->data[offset], bmp_version_major, bmp_version_minor; + uint16_t bmplength; + uint16_t legacy_scripts_offset, legacy_i2c_offset; + + /* load needed defaults in case we can't parse this info */ + bios->digital_min_front_porch = 0x4b; + bios->fmaxvco = 256000; + bios->fminvco = 128000; + bios->fp.duallink_transition_clk = 90000; + + bmp_version_major = bmp[5]; + bmp_version_minor = bmp[6]; + + NV_INFO(drm, "BMP version %d.%d\n", + bmp_version_major, bmp_version_minor); + + /* + * Make sure that 0x36 is blank and can't be mistaken for a DCB + * pointer on early versions + */ + if (bmp_version_major < 5) + *(uint16_t *)&bios->data[0x36] = 0; + + /* + * Seems that the minor version was 1 for all major versions prior + * to 5. Version 6 could theoretically exist, but I suspect BIT + * happened instead. + */ + if ((bmp_version_major < 5 && bmp_version_minor != 1) || bmp_version_major > 5) { + NV_ERROR(drm, "You have an unsupported BMP version. " + "Please send in your bios\n"); + return -ENOSYS; + } + + if (bmp_version_major == 0) + /* nothing that's currently useful in this version */ + return 0; + else if (bmp_version_major == 1) + bmplength = 44; /* exact for 1.01 */ + else if (bmp_version_major == 2) + bmplength = 48; /* exact for 2.01 */ + else if (bmp_version_major == 3) + bmplength = 54; + /* guessed - mem init tables added in this version */ + else if (bmp_version_major == 4 || bmp_version_minor < 0x1) + /* don't know if 5.0 exists... */ + bmplength = 62; + /* guessed - BMP I2C indices added in version 4*/ + else if (bmp_version_minor < 0x6) + bmplength = 67; /* exact for 5.01 */ + else if (bmp_version_minor < 0x10) + bmplength = 75; /* exact for 5.06 */ + else if (bmp_version_minor == 0x10) + bmplength = 89; /* exact for 5.10h */ + else if (bmp_version_minor < 0x14) + bmplength = 118; /* exact for 5.11h */ + else if (bmp_version_minor < 0x24) + /* + * Not sure of version where pll limits came in; + * certainly exist by 0x24 though. + */ + /* length not exact: this is long enough to get lvds members */ + bmplength = 123; + else if (bmp_version_minor < 0x27) + /* + * Length not exact: this is long enough to get pll limit + * member + */ + bmplength = 144; + else + /* + * Length not exact: this is long enough to get dual link + * transition clock. + */ + bmplength = 158; + + /* checksum */ + if (nv_cksum(bmp, 8)) { + NV_ERROR(drm, "Bad BMP checksum\n"); + return -EINVAL; + } + + /* + * Bit 4 seems to indicate either a mobile bios or a quadro card -- + * mobile behaviour consistent (nv11+), quadro only seen nv18gl-nv36gl + * (not nv10gl), bit 5 that the flat panel tables are present, and + * bit 6 a tv bios. + */ + bios->feature_byte = bmp[9]; + + if (bmp_version_major < 5 || bmp_version_minor < 0x10) + bios->old_style_init = true; + legacy_scripts_offset = 18; + if (bmp_version_major < 2) + legacy_scripts_offset -= 4; + bios->init_script_tbls_ptr = ROM16(bmp[legacy_scripts_offset]); + bios->extra_init_script_tbl_ptr = ROM16(bmp[legacy_scripts_offset + 2]); + + if (bmp_version_major > 2) { /* appears in BMP 3 */ + bios->legacy.mem_init_tbl_ptr = ROM16(bmp[24]); + bios->legacy.sdr_seq_tbl_ptr = ROM16(bmp[26]); + bios->legacy.ddr_seq_tbl_ptr = ROM16(bmp[28]); + } + + legacy_i2c_offset = 0x48; /* BMP version 2 & 3 */ + if (bmplength > 61) + legacy_i2c_offset = offset + 54; + bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset]; + bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1]; + bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2]; + + if (bmplength > 74) { + bios->fmaxvco = ROM32(bmp[67]); + bios->fminvco = ROM32(bmp[71]); + } + if (bmplength > 88) + parse_script_table_pointers(bios, offset + 75); + if (bmplength > 94) { + bios->tmds.output0_script_ptr = ROM16(bmp[89]); + bios->tmds.output1_script_ptr = ROM16(bmp[91]); + /* + * Never observed in use with lvds scripts, but is reused for + * 18/24 bit panel interface default for EDID equipped panels + * (if_is_24bit not set directly to avoid any oscillation). + */ + bios->legacy.lvds_single_a_script_ptr = ROM16(bmp[95]); + } + if (bmplength > 108) { + bios->fp.fptablepointer = ROM16(bmp[105]); + bios->fp.fpxlatetableptr = ROM16(bmp[107]); + bios->fp.xlatwidth = 1; + } + if (bmplength > 120) { + bios->fp.lvdsmanufacturerpointer = ROM16(bmp[117]); + bios->fp.fpxlatemanufacturertableptr = ROM16(bmp[119]); + } +#if 0 + if (bmplength > 143) + bios->pll_limit_tbl_ptr = ROM16(bmp[142]); +#endif + + if (bmplength > 157) + bios->fp.duallink_transition_clk = ROM16(bmp[156]) * 10; + + return 0; +} + +static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len) +{ + int i, j; + + for (i = 0; i <= (n - len); i++) { + for (j = 0; j < len; j++) + if (data[i + j] != str[j]) + break; + if (j == len) + return i; + } + + return 0; +} + +void * +olddcb_table(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + u8 *dcb = NULL; + + if (nv_device(drm->device)->card_type > NV_04) + dcb = ROMPTR(dev, drm->vbios.data[0x36]); + if (!dcb) { + NV_WARN(drm, "No DCB data found in VBIOS\n"); + return NULL; + } + + if (dcb[0] >= 0x41) { + NV_WARN(drm, "DCB version 0x%02x unknown\n", dcb[0]); + return NULL; + } else + if (dcb[0] >= 0x30) { + if (ROM32(dcb[6]) == 0x4edcbdcb) + return dcb; + } else + if (dcb[0] >= 0x20) { + if (ROM32(dcb[4]) == 0x4edcbdcb) + return dcb; + } else + if (dcb[0] >= 0x15) { + if (!memcmp(&dcb[-7], "DEV_REC", 7)) + return dcb; + } else { + /* + * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but + * always has the same single (crt) entry, even when tv-out + * present, so the conclusion is this version cannot really + * be used. + * + * v1.2 tables (some NV6/10, and NV15+) normally have the + * same 5 entries, which are not specific to the card and so + * no use. + * + * v1.2 does have an I2C table that read_dcb_i2c_table can + * handle, but cards exist (nv11 in #14821) with a bad i2c + * table pointer, so use the indices parsed in + * parse_bmp_structure. + * + * v1.1 (NV5+, maybe some NV4) is entirely unhelpful + */ + NV_WARN(drm, "No useful DCB data in VBIOS\n"); + return NULL; + } + + NV_WARN(drm, "DCB header validation failed\n"); + return NULL; +} + +void * +olddcb_outp(struct drm_device *dev, u8 idx) +{ + u8 *dcb = olddcb_table(dev); + if (dcb && dcb[0] >= 0x30) { + if (idx < dcb[2]) + return dcb + dcb[1] + (idx * dcb[3]); + } else + if (dcb && dcb[0] >= 0x20) { + u8 *i2c = ROMPTR(dev, dcb[2]); + u8 *ent = dcb + 8 + (idx * 8); + if (i2c && ent < i2c) + return ent; + } else + if (dcb && dcb[0] >= 0x15) { + u8 *i2c = ROMPTR(dev, dcb[2]); + u8 *ent = dcb + 4 + (idx * 10); + if (i2c && ent < i2c) + return ent; + } + + return NULL; +} + +int +olddcb_outp_foreach(struct drm_device *dev, void *data, + int (*exec)(struct drm_device *, void *, int idx, u8 *outp)) +{ + int ret, idx = -1; + u8 *outp = NULL; + while ((outp = olddcb_outp(dev, ++idx))) { + if (ROM32(outp[0]) == 0x00000000) + break; /* seen on an NV11 with DCB v1.5 */ + if (ROM32(outp[0]) == 0xffffffff) + break; /* seen on an NV17 with DCB v2.0 */ + + if ((outp[0] & 0x0f) == DCB_OUTPUT_UNUSED) + continue; + if ((outp[0] & 0x0f) == DCB_OUTPUT_EOL) + break; + + ret = exec(dev, data, idx, outp); + if (ret) + return ret; + } + + return 0; +} + +u8 * +olddcb_conntab(struct drm_device *dev) +{ + u8 *dcb = olddcb_table(dev); + if (dcb && dcb[0] >= 0x30 && dcb[1] >= 0x16) { + u8 *conntab = ROMPTR(dev, dcb[0x14]); + if (conntab && conntab[0] >= 0x30 && conntab[0] <= 0x40) + return conntab; + } + return NULL; +} + +u8 * +olddcb_conn(struct drm_device *dev, u8 idx) +{ + u8 *conntab = olddcb_conntab(dev); + if (conntab && idx < conntab[2]) + return conntab + conntab[1] + (idx * conntab[3]); + return NULL; +} + +static struct dcb_output *new_dcb_entry(struct dcb_table *dcb) +{ + struct dcb_output *entry = &dcb->entry[dcb->entries]; + + memset(entry, 0, sizeof(struct dcb_output)); + entry->index = dcb->entries++; + + return entry; +} + +static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c, + int heads, int or) +{ + struct dcb_output *entry = new_dcb_entry(dcb); + + entry->type = type; + entry->i2c_index = i2c; + entry->heads = heads; + if (type != DCB_OUTPUT_ANALOG) + entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */ + entry->or = or; +} + +static bool +parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, + uint32_t conn, uint32_t conf, struct dcb_output *entry) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + entry->type = conn & 0xf; + entry->i2c_index = (conn >> 4) & 0xf; + entry->heads = (conn >> 8) & 0xf; + entry->connector = (conn >> 12) & 0xf; + entry->bus = (conn >> 16) & 0xf; + entry->location = (conn >> 20) & 0x3; + entry->or = (conn >> 24) & 0xf; + + switch (entry->type) { + case DCB_OUTPUT_ANALOG: + /* + * Although the rest of a CRT conf dword is usually + * zeros, mac biosen have stuff there so we must mask + */ + entry->crtconf.maxfreq = (dcb->version < 0x30) ? + (conf & 0xffff) * 10 : + (conf & 0xff) * 10000; + break; + case DCB_OUTPUT_LVDS: + { + uint32_t mask; + if (conf & 0x1) + entry->lvdsconf.use_straps_for_mode = true; + if (dcb->version < 0x22) { + mask = ~0xd; + /* + * The laptop in bug 14567 lies and claims to not use + * straps when it does, so assume all DCB 2.0 laptops + * use straps, until a broken EDID using one is produced + */ + entry->lvdsconf.use_straps_for_mode = true; + /* + * Both 0x4 and 0x8 show up in v2.0 tables; assume they + * mean the same thing (probably wrong, but might work) + */ + if (conf & 0x4 || conf & 0x8) + entry->lvdsconf.use_power_scripts = true; + } else { + mask = ~0x7; + if (conf & 0x2) + entry->lvdsconf.use_acpi_for_edid = true; + if (conf & 0x4) + entry->lvdsconf.use_power_scripts = true; + entry->lvdsconf.sor.link = (conf & 0x00000030) >> 4; + } + if (conf & mask) { + /* + * Until we even try to use these on G8x, it's + * useless reporting unknown bits. They all are. + */ + if (dcb->version >= 0x40) + break; + + NV_ERROR(drm, "Unknown LVDS configuration bits, " + "please report\n"); + } + break; + } + case DCB_OUTPUT_TV: + { + if (dcb->version >= 0x30) + entry->tvconf.has_component_output = conf & (0x8 << 4); + else + entry->tvconf.has_component_output = false; + + break; + } + case DCB_OUTPUT_DP: + entry->dpconf.sor.link = (conf & 0x00000030) >> 4; + entry->extdev = (conf & 0x0000ff00) >> 8; + switch ((conf & 0x00e00000) >> 21) { + case 0: + entry->dpconf.link_bw = 162000; + break; + default: + entry->dpconf.link_bw = 270000; + break; + } + switch ((conf & 0x0f000000) >> 24) { + case 0xf: + entry->dpconf.link_nr = 4; + break; + case 0x3: + entry->dpconf.link_nr = 2; + break; + default: + entry->dpconf.link_nr = 1; + break; + } + break; + case DCB_OUTPUT_TMDS: + if (dcb->version >= 0x40) { + entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4; + entry->extdev = (conf & 0x0000ff00) >> 8; + } + else if (dcb->version >= 0x30) + entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8; + else if (dcb->version >= 0x22) + entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4; + + break; + case DCB_OUTPUT_EOL: + /* weird g80 mobile type that "nv" treats as a terminator */ + dcb->entries--; + return false; + default: + break; + } + + if (dcb->version < 0x40) { + /* Normal entries consist of a single bit, but dual link has + * the next most significant bit set too + */ + entry->duallink_possible = + ((1 << (ffs(entry->or) - 1)) * 3 == entry->or); + } else { + entry->duallink_possible = (entry->sorconf.link == 3); + } + + /* unsure what DCB version introduces this, 3.0? */ + if (conf & 0x100000) + entry->i2c_upper_default = true; + + return true; +} + +static bool +parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb, + uint32_t conn, uint32_t conf, struct dcb_output *entry) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + switch (conn & 0x0000000f) { + case 0: + entry->type = DCB_OUTPUT_ANALOG; + break; + case 1: + entry->type = DCB_OUTPUT_TV; + break; + case 2: + case 4: + if (conn & 0x10) + entry->type = DCB_OUTPUT_LVDS; + else + entry->type = DCB_OUTPUT_TMDS; + break; + case 3: + entry->type = DCB_OUTPUT_LVDS; + break; + default: + NV_ERROR(drm, "Unknown DCB type %d\n", conn & 0x0000000f); + return false; + } + + entry->i2c_index = (conn & 0x0003c000) >> 14; + entry->heads = ((conn & 0x001c0000) >> 18) + 1; + entry->or = entry->heads; /* same as heads, hopefully safe enough */ + entry->location = (conn & 0x01e00000) >> 21; + entry->bus = (conn & 0x0e000000) >> 25; + entry->duallink_possible = false; + + switch (entry->type) { + case DCB_OUTPUT_ANALOG: + entry->crtconf.maxfreq = (conf & 0xffff) * 10; + break; + case DCB_OUTPUT_TV: + entry->tvconf.has_component_output = false; + break; + case DCB_OUTPUT_LVDS: + if ((conn & 0x00003f00) >> 8 != 0x10) + entry->lvdsconf.use_straps_for_mode = true; + entry->lvdsconf.use_power_scripts = true; + break; + default: + break; + } + + return true; +} + +static +void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb) +{ + /* + * DCB v2.0 lists each output combination separately. + * Here we merge compatible entries to have fewer outputs, with + * more options + */ + + struct nouveau_drm *drm = nouveau_drm(dev); + int i, newentries = 0; + + for (i = 0; i < dcb->entries; i++) { + struct dcb_output *ient = &dcb->entry[i]; + int j; + + for (j = i + 1; j < dcb->entries; j++) { + struct dcb_output *jent = &dcb->entry[j]; + + if (jent->type == 100) /* already merged entry */ + continue; + + /* merge heads field when all other fields the same */ + if (jent->i2c_index == ient->i2c_index && + jent->type == ient->type && + jent->location == ient->location && + jent->or == ient->or) { + NV_INFO(drm, "Merging DCB entries %d and %d\n", + i, j); + ient->heads |= jent->heads; + jent->type = 100; /* dummy value */ + } + } + } + + /* Compact entries merged into others out of dcb */ + for (i = 0; i < dcb->entries; i++) { + if (dcb->entry[i].type == 100) + continue; + + if (newentries != i) { + dcb->entry[newentries] = dcb->entry[i]; + dcb->entry[newentries].index = newentries; + } + newentries++; + } + + dcb->entries = newentries; +} + +static bool +apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct dcb_table *dcb = &drm->vbios.dcb; + + /* Dell Precision M6300 + * DCB entry 2: 02025312 00000010 + * DCB entry 3: 02026312 00000020 + * + * Identical, except apparently a different connector on a + * different SOR link. Not a clue how we're supposed to know + * which one is in use if it even shares an i2c line... + * + * Ignore the connector on the second SOR link to prevent + * nasty problems until this is sorted (assuming it's not a + * VBIOS bug). + */ + if (nv_match_device(dev, 0x040d, 0x1028, 0x019b)) { + if (*conn == 0x02026312 && *conf == 0x00000020) + return false; + } + + /* GeForce3 Ti 200 + * + * DCB reports an LVDS output that should be TMDS: + * DCB entry 1: f2005014 ffffffff + */ + if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) { + if (*conn == 0xf2005014 && *conf == 0xffffffff) { + fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1); + return false; + } + } + + /* XFX GT-240X-YA + * + * So many things wrong here, replace the entire encoder table.. + */ + if (nv_match_device(dev, 0x0ca3, 0x1682, 0x3003)) { + if (idx == 0) { + *conn = 0x02001300; /* VGA, connector 1 */ + *conf = 0x00000028; + } else + if (idx == 1) { + *conn = 0x01010312; /* DVI, connector 0 */ + *conf = 0x00020030; + } else + if (idx == 2) { + *conn = 0x01010310; /* VGA, connector 0 */ + *conf = 0x00000028; + } else + if (idx == 3) { + *conn = 0x02022362; /* HDMI, connector 2 */ + *conf = 0x00020010; + } else { + *conn = 0x0000000e; /* EOL */ + *conf = 0x00000000; + } + } + + /* Some other twisted XFX board (rhbz#694914) + * + * The DVI/VGA encoder combo that's supposed to represent the + * DVI-I connector actually point at two different ones, and + * the HDMI connector ends up paired with the VGA instead. + * + * Connector table is missing anything for VGA at all, pointing it + * an invalid conntab entry 2 so we figure it out ourself. + */ + if (nv_match_device(dev, 0x0615, 0x1682, 0x2605)) { + if (idx == 0) { + *conn = 0x02002300; /* VGA, connector 2 */ + *conf = 0x00000028; + } else + if (idx == 1) { + *conn = 0x01010312; /* DVI, connector 0 */ + *conf = 0x00020030; + } else + if (idx == 2) { + *conn = 0x04020310; /* VGA, connector 0 */ + *conf = 0x00000028; + } else + if (idx == 3) { + *conn = 0x02021322; /* HDMI, connector 1 */ + *conf = 0x00020010; + } else { + *conn = 0x0000000e; /* EOL */ + *conf = 0x00000000; + } + } + + /* fdo#50830: connector indices for VGA and DVI-I are backwards */ + if (nv_match_device(dev, 0x0421, 0x3842, 0xc793)) { + if (idx == 0 && *conn == 0x02000300) + *conn = 0x02011300; + else + if (idx == 1 && *conn == 0x04011310) + *conn = 0x04000310; + else + if (idx == 2 && *conn == 0x02011312) + *conn = 0x02000312; + } + + return true; +} + +static void +fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios) +{ + struct dcb_table *dcb = &bios->dcb; + int all_heads = (nv_two_heads(dev) ? 3 : 1); + +#ifdef __powerpc__ + /* Apple iMac G4 NV17 */ + if (of_machine_is_compatible("PowerMac4,5")) { + fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1); + fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2); + return; + } +#endif + + /* Make up some sane defaults */ + fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, + bios->legacy.i2c_indices.crt, 1, 1); + + if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) + fabricate_dcb_output(dcb, DCB_OUTPUT_TV, + bios->legacy.i2c_indices.tv, + all_heads, 0); + + else if (bios->tmds.output0_script_ptr || + bios->tmds.output1_script_ptr) + fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, + bios->legacy.i2c_indices.panel, + all_heads, 1); +} + +static int +parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct dcb_table *dcb = &drm->vbios.dcb; + u32 conf = (dcb->version >= 0x20) ? ROM32(outp[4]) : ROM32(outp[6]); + u32 conn = ROM32(outp[0]); + bool ret; + + if (apply_dcb_encoder_quirks(dev, idx, &conn, &conf)) { + struct dcb_output *entry = new_dcb_entry(dcb); + + NV_INFO(drm, "DCB outp %02d: %08x %08x\n", idx, conn, conf); + + if (dcb->version >= 0x20) + ret = parse_dcb20_entry(dev, dcb, conn, conf, entry); + else + ret = parse_dcb15_entry(dev, dcb, conn, conf, entry); + if (!ret) + return 1; /* stop parsing */ + + /* Ignore the I2C index for on-chip TV-out, as there + * are cards with bogus values (nv31m in bug 23212), + * and it's otherwise useless. + */ + if (entry->type == DCB_OUTPUT_TV && + entry->location == DCB_LOC_ON_CHIP) + entry->i2c_index = 0x0f; + } + + return 0; +} + +static void +dcb_fake_connectors(struct nvbios *bios) +{ + struct dcb_table *dcbt = &bios->dcb; + u8 map[16] = { }; + int i, idx = 0; + + /* heuristic: if we ever get a non-zero connector field, assume + * that all the indices are valid and we don't need fake them. + * + * and, as usual, a blacklist of boards with bad bios data.. + */ + if (!nv_match_device(bios->dev, 0x0392, 0x107d, 0x20a2)) { + for (i = 0; i < dcbt->entries; i++) { + if (dcbt->entry[i].connector) + return; + } + } + + /* no useful connector info available, we need to make it up + * ourselves. the rule here is: anything on the same i2c bus + * is considered to be on the same connector. any output + * without an associated i2c bus is assigned its own unique + * connector index. + */ + for (i = 0; i < dcbt->entries; i++) { + u8 i2c = dcbt->entry[i].i2c_index; + if (i2c == 0x0f) { + dcbt->entry[i].connector = idx++; + } else { + if (!map[i2c]) + map[i2c] = ++idx; + dcbt->entry[i].connector = map[i2c] - 1; + } + } + + /* if we created more than one connector, destroy the connector + * table - just in case it has random, rather than stub, entries. + */ + if (i > 1) { + u8 *conntab = olddcb_conntab(bios->dev); + if (conntab) + conntab[0] = 0x00; + } +} + +static int +parse_dcb_table(struct drm_device *dev, struct nvbios *bios) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct dcb_table *dcb = &bios->dcb; + u8 *dcbt, *conn; + int idx; + + dcbt = olddcb_table(dev); + if (!dcbt) { + /* handle pre-DCB boards */ + if (bios->type == NVBIOS_BMP) { + fabricate_dcb_encoder_table(dev, bios); + return 0; + } + + return -EINVAL; + } + + NV_INFO(drm, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf); + + dcb->version = dcbt[0]; + olddcb_outp_foreach(dev, NULL, parse_dcb_entry); + + /* + * apart for v2.1+ not being known for requiring merging, this + * guarantees dcbent->index is the index of the entry in the rom image + */ + if (dcb->version < 0x21) + merge_like_dcb_entries(dev, dcb); + + if (!dcb->entries) + return -ENXIO; + + /* dump connector table entries to log, if any exist */ + idx = -1; + while ((conn = olddcb_conn(dev, ++idx))) { + if (conn[0] != 0xff) { + NV_INFO(drm, "DCB conn %02d: ", idx); + if (olddcb_conntab(dev)[3] < 4) + pr_cont("%04x\n", ROM16(conn[0])); + else + pr_cont("%08x\n", ROM32(conn[0])); + } + } + dcb_fake_connectors(bios); + return 0; +} + +static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry) +{ + /* + * The header following the "HWSQ" signature has the number of entries, + * and the entry size + * + * An entry consists of a dword to write to the sequencer control reg + * (0x00001304), followed by the ucode bytes, written sequentially, + * starting at reg 0x00001400 + */ + + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nv_device(drm->device); + uint8_t bytes_to_write; + uint16_t hwsq_entry_offset; + int i; + + if (bios->data[hwsq_offset] <= entry) { + NV_ERROR(drm, "Too few entries in HW sequencer table for " + "requested entry\n"); + return -ENOENT; + } + + bytes_to_write = bios->data[hwsq_offset + 1]; + + if (bytes_to_write != 36) { + NV_ERROR(drm, "Unknown HW sequencer entry size\n"); + return -EINVAL; + } + + NV_INFO(drm, "Loading NV17 power sequencing microcode\n"); + + hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write; + + /* set sequencer control */ + nv_wr32(device, 0x00001304, ROM32(bios->data[hwsq_entry_offset])); + bytes_to_write -= 4; + + /* write ucode */ + for (i = 0; i < bytes_to_write; i += 4) + nv_wr32(device, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4])); + + /* twiddle NV_PBUS_DEBUG_4 */ + nv_wr32(device, NV_PBUS_DEBUG_4, nv_rd32(device, NV_PBUS_DEBUG_4) | 0x18); + + return 0; +} + +static int load_nv17_hw_sequencer_ucode(struct drm_device *dev, + struct nvbios *bios) +{ + /* + * BMP based cards, from NV17, need a microcode loading to correctly + * control the GPIO etc for LVDS panels + * + * BIT based cards seem to do this directly in the init scripts + * + * The microcode entries are found by the "HWSQ" signature. + */ + + const uint8_t hwsq_signature[] = { 'H', 'W', 'S', 'Q' }; + const int sz = sizeof(hwsq_signature); + int hwsq_offset; + + hwsq_offset = findstr(bios->data, bios->length, hwsq_signature, sz); + if (!hwsq_offset) + return 0; + + /* always use entry 0? */ + return load_nv17_hwsq_ucode_entry(dev, bios, hwsq_offset + sz, 0); +} + +uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvbios *bios = &drm->vbios; + const uint8_t edid_sig[] = { + 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; + uint16_t offset = 0; + uint16_t newoffset; + int searchlen = NV_PROM_SIZE; + + if (bios->fp.edid) + return bios->fp.edid; + + while (searchlen) { + newoffset = findstr(&bios->data[offset], searchlen, + edid_sig, 8); + if (!newoffset) + return NULL; + offset += newoffset; + if (!nv_cksum(&bios->data[offset], EDID1_LEN)) + break; + + searchlen -= offset; + offset++; + } + + NV_INFO(drm, "Found EDID in BIOS\n"); + + return bios->fp.edid = &bios->data[offset]; +} + +static bool NVInitVBIOS(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_bios *bios = nouveau_bios(drm->device); + struct nvbios *legacy = &drm->vbios; + + memset(legacy, 0, sizeof(struct nvbios)); + spin_lock_init(&legacy->lock); + legacy->dev = dev; + + legacy->data = bios->data; + legacy->length = bios->size; + legacy->major_version = bios->version.major; + legacy->chip_version = bios->version.chip; + if (bios->bit_offset) { + legacy->type = NVBIOS_BIT; + legacy->offset = bios->bit_offset; + return !parse_bit_structure(legacy, legacy->offset + 6); + } else + if (bios->bmp_offset) { + legacy->type = NVBIOS_BMP; + legacy->offset = bios->bmp_offset; + return !parse_bmp_structure(dev, legacy, legacy->offset); + } + + return false; +} + +int +nouveau_run_vbios_init(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvbios *bios = &drm->vbios; + int ret = 0; + + /* Reset the BIOS head to 0. */ + bios->state.crtchead = 0; + + if (bios->major_version < 5) /* BMP only */ + load_nv17_hw_sequencer_ucode(dev, bios); + + if (bios->execute) { + bios->fp.last_script_invoc = 0; + bios->fp.lvds_init_run = false; + } + + return ret; +} + +static bool +nouveau_bios_posted(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + unsigned htotal; + + if (nv_device(drm->device)->card_type >= NV_50) { + if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && + NVReadVgaCrtc(dev, 0, 0x1a) == 0) + return false; + return true; + } + + htotal = NVReadVgaCrtc(dev, 0, 0x06); + htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8; + htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4; + htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10; + htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11; + + return (htotal != 0); +} + +int +nouveau_bios_init(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvbios *bios = &drm->vbios; + int ret; + + if (!NVInitVBIOS(dev)) + return -ENODEV; + + ret = parse_dcb_table(dev, bios); + if (ret) + return ret; + + if (!bios->major_version) /* we don't run version 0 bios */ + return 0; + + /* init script execution disabled */ + bios->execute = false; + + /* ... unless card isn't POSTed already */ + if (!nouveau_bios_posted(dev)) { + NV_INFO(drm, "Adaptor not initialised, " + "running VBIOS init tables.\n"); + bios->execute = true; + } + + ret = nouveau_run_vbios_init(dev); + if (ret) + return ret; + + /* feature_byte on BMP is poor, but init always sets CR4B */ + if (bios->major_version < 5) + bios->is_mobile = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_4B) & 0x40; + + /* all BIT systems need p_f_m_t for digital_min_front_porch */ + if (bios->is_mobile || bios->major_version >= 5) + ret = parse_fp_mode_table(dev, bios); + + /* allow subsequent scripts to execute */ + bios->execute = true; + + return 0; +} + +void +nouveau_bios_takedown(struct drm_device *dev) +{ +} diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h new file mode 100644 index 0000000..0067586 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h @@ -0,0 +1,179 @@ +/* + * Copyright 2007-2008 Nouveau Project + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NOUVEAU_DISPBIOS_H__ +#define __NOUVEAU_DISPBIOS_H__ + +#define DCB_MAX_NUM_ENTRIES 16 +#define DCB_MAX_NUM_I2C_ENTRIES 16 +#define DCB_MAX_NUM_GPIO_ENTRIES 32 +#define DCB_MAX_NUM_CONNECTOR_ENTRIES 16 + +#define DCB_LOC_ON_CHIP 0 + +#define ROM16(x) le16_to_cpu(*(u16 *)&(x)) +#define ROM32(x) le32_to_cpu(*(u32 *)&(x)) +#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); }) +#define ROM64(x) le64_to_cpu(*(u64 *)&(x)) +#define ROMPTR(d,x) ({ \ + struct nouveau_drm *drm = nouveau_drm((d)); \ + ROM16(x) ? &drm->vbios.data[ROM16(x)] : NULL; \ +}) + +struct bit_entry { + uint8_t id; + uint8_t version; + uint16_t length; + uint16_t offset; + uint8_t *data; +}; + +int bit_table(struct drm_device *, u8 id, struct bit_entry *); + +#include <subdev/bios/dcb.h> +#include <subdev/bios/conn.h> + +struct dcb_table { + uint8_t version; + int entries; + struct dcb_output entry[DCB_MAX_NUM_ENTRIES]; +}; + +enum nouveau_or { + DCB_OUTPUT_A = (1 << 0), + DCB_OUTPUT_B = (1 << 1), + DCB_OUTPUT_C = (1 << 2) +}; + +enum LVDS_script { + /* Order *does* matter here */ + LVDS_INIT = 1, + LVDS_RESET, + LVDS_BACKLIGHT_ON, + LVDS_BACKLIGHT_OFF, + LVDS_PANEL_ON, + LVDS_PANEL_OFF +}; + +struct nvbios { + struct drm_device *dev; + enum { + NVBIOS_BMP, + NVBIOS_BIT + } type; + uint16_t offset; + uint32_t length; + uint8_t *data; + + uint8_t chip_version; + + uint32_t dactestval; + uint32_t tvdactestval; + uint8_t digital_min_front_porch; + bool fp_no_ddc; + + spinlock_t lock; + + bool execute; + + uint8_t major_version; + uint8_t feature_byte; + bool is_mobile; + + uint32_t fmaxvco, fminvco; + + bool old_style_init; + uint16_t init_script_tbls_ptr; + uint16_t extra_init_script_tbl_ptr; + + uint16_t ram_restrict_tbl_ptr; + uint8_t ram_restrict_group_count; + + struct dcb_table dcb; + + struct { + int crtchead; + } state; + + struct { + uint16_t fptablepointer; /* also used by tmds */ + uint16_t fpxlatetableptr; + int xlatwidth; + uint16_t lvdsmanufacturerpointer; + uint16_t fpxlatemanufacturertableptr; + uint16_t mode_ptr; + uint16_t xlated_entry; + bool power_off_for_reset; + bool reset_after_pclk_change; + bool dual_link; + bool link_c_increment; + bool if_is_24bit; + int duallink_transition_clk; + uint8_t strapless_is_24bit; + uint8_t *edid; + + /* will need resetting after suspend */ + int last_script_invoc; + bool lvds_init_run; + } fp; + + struct { + uint16_t output0_script_ptr; + uint16_t output1_script_ptr; + } tmds; + + struct { + uint16_t mem_init_tbl_ptr; + uint16_t sdr_seq_tbl_ptr; + uint16_t ddr_seq_tbl_ptr; + + struct { + uint8_t crt, tv, panel; + } i2c_indices; + + uint16_t lvds_single_a_script_ptr; + } legacy; +}; + +void *olddcb_table(struct drm_device *); +void *olddcb_outp(struct drm_device *, u8 idx); +int olddcb_outp_foreach(struct drm_device *, void *data, + int (*)(struct drm_device *, void *, int idx, u8 *outp)); +u8 *olddcb_conntab(struct drm_device *); +u8 *olddcb_conn(struct drm_device *, u8 idx); + +int nouveau_bios_init(struct drm_device *); +void nouveau_bios_takedown(struct drm_device *dev); +int nouveau_run_vbios_init(struct drm_device *); +struct dcb_connector_table_entry * +nouveau_bios_connector_entry(struct drm_device *, int index); +bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *); +uint8_t *nouveau_bios_embedded_edid(struct drm_device *); +int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk, + bool *dl, bool *if_is_24bit); +int run_tmds_table(struct drm_device *, struct dcb_output *, + int head, int pxclk); +int call_lvds_script(struct drm_device *, struct dcb_output *, int head, + enum LVDS_script, int pxclk); + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c new file mode 100644 index 0000000..6b5814f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -0,0 +1,1567 @@ +/* + * Copyright 2007 Dave Airlied + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +/* + * Authors: Dave Airlied <airlied@linux.ie> + * Ben Skeggs <darktama@iinet.net.au> + * Jeremy Kolb <jkolb@brandeis.edu> + */ + +#include <core/engine.h> +#include <linux/swiotlb.h> + +#include <subdev/fb.h> +#include <subdev/vm.h> +#include <subdev/bar.h> + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_fence.h" + +#include "nouveau_bo.h" +#include "nouveau_ttm.h" +#include "nouveau_gem.h" + +/* + * NV10-NV40 tiling helpers + */ + +static void +nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, + u32 addr, u32 size, u32 pitch, u32 flags) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + int i = reg - drm->tile.reg; + struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nouveau_fb_tile *tile = &pfb->tile.region[i]; + struct nouveau_engine *engine; + + nouveau_fence_unref(®->fence); + + if (tile->pitch) + pfb->tile.fini(pfb, i, tile); + + if (pitch) + pfb->tile.init(pfb, i, addr, size, pitch, flags, tile); + + pfb->tile.prog(pfb, i, tile); + + if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR))) + engine->tile_prog(engine, i); + if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG))) + engine->tile_prog(engine, i); +} + +static struct nouveau_drm_tile * +nv10_bo_get_tile_region(struct drm_device *dev, int i) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_drm_tile *tile = &drm->tile.reg[i]; + + spin_lock(&drm->tile.lock); + + if (!tile->used && + (!tile->fence || nouveau_fence_done(tile->fence))) + tile->used = true; + else + tile = NULL; + + spin_unlock(&drm->tile.lock); + return tile; +} + +static void +nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, + struct nouveau_fence *fence) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + if (tile) { + spin_lock(&drm->tile.lock); + if (fence) { + /* Mark it as pending. */ + tile->fence = fence; + nouveau_fence_ref(fence); + } + + tile->used = false; + spin_unlock(&drm->tile.lock); + } +} + +static struct nouveau_drm_tile * +nv10_bo_set_tiling(struct drm_device *dev, u32 addr, + u32 size, u32 pitch, u32 flags) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nouveau_drm_tile *tile, *found = NULL; + int i; + + for (i = 0; i < pfb->tile.regions; i++) { + tile = nv10_bo_get_tile_region(dev, i); + + if (pitch && !found) { + found = tile; + continue; + + } else if (tile && pfb->tile.region[i].pitch) { + /* Kill an unused tile region. */ + nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); + } + + nv10_bo_put_tile_region(dev, tile, NULL); + } + + if (found) + nv10_bo_update_tile_region(dev, found, addr, size, + pitch, flags); + return found; +} + +static void +nouveau_bo_del_ttm(struct ttm_buffer_object *bo) +{ + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct drm_device *dev = drm->dev; + struct nouveau_bo *nvbo = nouveau_bo(bo); + + if (unlikely(nvbo->gem)) + DRM_ERROR("bo %p still attached to GEM object\n", bo); + nv10_bo_put_tile_region(dev, nvbo->tile, NULL); + kfree(nvbo); +} + +static void +nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, + int *align, int *size) +{ + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); + struct nouveau_device *device = nv_device(drm->device); + + if (device->card_type < NV_50) { + if (nvbo->tile_mode) { + if (device->chipset >= 0x40) { + *align = 65536; + *size = roundup(*size, 64 * nvbo->tile_mode); + + } else if (device->chipset >= 0x30) { + *align = 32768; + *size = roundup(*size, 64 * nvbo->tile_mode); + + } else if (device->chipset >= 0x20) { + *align = 16384; + *size = roundup(*size, 64 * nvbo->tile_mode); + + } else if (device->chipset >= 0x10) { + *align = 16384; + *size = roundup(*size, 32 * nvbo->tile_mode); + } + } + } else { + *size = roundup(*size, (1 << nvbo->page_shift)); + *align = max((1 << nvbo->page_shift), *align); + } + + *size = roundup(*size, PAGE_SIZE); +} + +int +nouveau_bo_new(struct drm_device *dev, int size, int align, + uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, + struct sg_table *sg, + struct nouveau_bo **pnvbo) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_bo *nvbo; + size_t acc_size; + int ret; + int type = ttm_bo_type_device; + + if (sg) + type = ttm_bo_type_sg; + + nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); + if (!nvbo) + return -ENOMEM; + INIT_LIST_HEAD(&nvbo->head); + INIT_LIST_HEAD(&nvbo->entry); + INIT_LIST_HEAD(&nvbo->vma_list); + nvbo->tile_mode = tile_mode; + nvbo->tile_flags = tile_flags; + nvbo->bo.bdev = &drm->ttm.bdev; + + nvbo->page_shift = 12; + if (drm->client.base.vm) { + if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) + nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift; + } + + nouveau_bo_fixup_align(nvbo, flags, &align, &size); + nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; + nouveau_bo_placement_set(nvbo, flags, 0); + + acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size, + sizeof(struct nouveau_bo)); + + ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, + type, &nvbo->placement, + align >> PAGE_SHIFT, false, NULL, acc_size, sg, + nouveau_bo_del_ttm); + if (ret) { + /* ttm will call nouveau_bo_del_ttm if it fails.. */ + return ret; + } + + *pnvbo = nvbo; + return 0; +} + +static void +set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) +{ + *n = 0; + + if (type & TTM_PL_FLAG_VRAM) + pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; + if (type & TTM_PL_FLAG_TT) + pl[(*n)++] = TTM_PL_FLAG_TT | flags; + if (type & TTM_PL_FLAG_SYSTEM) + pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; +} + +static void +set_placement_range(struct nouveau_bo *nvbo, uint32_t type) +{ + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); + struct nouveau_fb *pfb = nouveau_fb(drm->device); + u32 vram_pages = pfb->ram.size >> PAGE_SHIFT; + + if (nv_device(drm->device)->card_type == NV_10 && + nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && + nvbo->bo.mem.num_pages < vram_pages / 4) { + /* + * Make sure that the color and depth buffers are handled + * by independent memory controller units. Up to a 9x + * speed up when alpha-blending and depth-test are enabled + * at the same time. + */ + if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { + nvbo->placement.fpfn = vram_pages / 2; + nvbo->placement.lpfn = ~0; + } else { + nvbo->placement.fpfn = 0; + nvbo->placement.lpfn = vram_pages / 2; + } + } +} + +void +nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) +{ + struct ttm_placement *pl = &nvbo->placement; + uint32_t flags = TTM_PL_MASK_CACHING | + (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); + + pl->placement = nvbo->placements; + set_placement_list(nvbo->placements, &pl->num_placement, + type, flags); + + pl->busy_placement = nvbo->busy_placements; + set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, + type | busy, flags); + + set_placement_range(nvbo, type); +} + +int +nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) +{ + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); + struct ttm_buffer_object *bo = &nvbo->bo; + int ret; + + ret = ttm_bo_reserve(bo, false, false, false, 0); + if (ret) + goto out; + + if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { + NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, + 1 << bo->mem.mem_type, memtype); + ret = -EINVAL; + goto out; + } + + if (nvbo->pin_refcnt++) + goto out; + + nouveau_bo_placement_set(nvbo, memtype, 0); + + ret = nouveau_bo_validate(nvbo, false, false); + if (ret == 0) { + switch (bo->mem.mem_type) { + case TTM_PL_VRAM: + drm->gem.vram_available -= bo->mem.size; + break; + case TTM_PL_TT: + drm->gem.gart_available -= bo->mem.size; + break; + default: + break; + } + } +out: + ttm_bo_unreserve(bo); + return ret; +} + +int +nouveau_bo_unpin(struct nouveau_bo *nvbo) +{ + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); + struct ttm_buffer_object *bo = &nvbo->bo; + int ret; + + ret = ttm_bo_reserve(bo, false, false, false, 0); + if (ret) + return ret; + + if (--nvbo->pin_refcnt) + goto out; + + nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); + + ret = nouveau_bo_validate(nvbo, false, false); + if (ret == 0) { + switch (bo->mem.mem_type) { + case TTM_PL_VRAM: + drm->gem.vram_available += bo->mem.size; + break; + case TTM_PL_TT: + drm->gem.gart_available += bo->mem.size; + break; + default: + break; + } + } + +out: + ttm_bo_unreserve(bo); + return ret; +} + +int +nouveau_bo_map(struct nouveau_bo *nvbo) +{ + int ret; + + ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); + if (ret) + return ret; + + ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); + ttm_bo_unreserve(&nvbo->bo); + return ret; +} + +void +nouveau_bo_unmap(struct nouveau_bo *nvbo) +{ + if (nvbo) + ttm_bo_kunmap(&nvbo->kmap); +} + +int +nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, + bool no_wait_gpu) +{ + int ret; + + ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, + interruptible, no_wait_gpu); + if (ret) + return ret; + + return 0; +} + +u16 +nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) +{ + bool is_iomem; + u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); + mem = &mem[index]; + if (is_iomem) + return ioread16_native((void __force __iomem *)mem); + else + return *mem; +} + +void +nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) +{ + bool is_iomem; + u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); + mem = &mem[index]; + if (is_iomem) + iowrite16_native(val, (void __force __iomem *)mem); + else + *mem = val; +} + +u32 +nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) +{ + bool is_iomem; + u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); + mem = &mem[index]; + if (is_iomem) + return ioread32_native((void __force __iomem *)mem); + else + return *mem; +} + +void +nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) +{ + bool is_iomem; + u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); + mem = &mem[index]; + if (is_iomem) + iowrite32_native(val, (void __force __iomem *)mem); + else + *mem = val; +} + +static struct ttm_tt * +nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, + uint32_t page_flags, struct page *dummy_read) +{ +#if __OS_HAS_AGP + struct nouveau_drm *drm = nouveau_bdev(bdev); + struct drm_device *dev = drm->dev; + + if (drm->agp.stat == ENABLED) { + return ttm_agp_tt_create(bdev, dev->agp->bridge, size, + page_flags, dummy_read); + } +#endif + + return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read); +} + +static int +nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) +{ + /* We'll do this from user space. */ + return 0; +} + +static int +nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, + struct ttm_mem_type_manager *man) +{ + struct nouveau_drm *drm = nouveau_bdev(bdev); + + switch (type) { + case TTM_PL_SYSTEM: + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_MASK_CACHING; + man->default_caching = TTM_PL_FLAG_CACHED; + break; + case TTM_PL_VRAM: + if (nv_device(drm->device)->card_type >= NV_50) { + man->func = &nouveau_vram_manager; + man->io_reserve_fastpath = false; + man->use_io_reserve_lru = true; + } else { + man->func = &ttm_bo_manager_func; + } + man->flags = TTM_MEMTYPE_FLAG_FIXED | + TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + break; + case TTM_PL_TT: + if (nv_device(drm->device)->card_type >= NV_50) + man->func = &nouveau_gart_manager; + else + if (drm->agp.stat != ENABLED) + man->func = &nv04_gart_manager; + else + man->func = &ttm_bo_manager_func; + + if (drm->agp.stat == ENABLED) { + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + } else { + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | + TTM_MEMTYPE_FLAG_CMA; + man->available_caching = TTM_PL_MASK_CACHING; + man->default_caching = TTM_PL_FLAG_CACHED; + } + + break; + default: + return -EINVAL; + } + return 0; +} + +static void +nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) +{ + struct nouveau_bo *nvbo = nouveau_bo(bo); + + switch (bo->mem.mem_type) { + case TTM_PL_VRAM: + nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, + TTM_PL_FLAG_SYSTEM); + break; + default: + nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); + break; + } + + *pl = nvbo->placement; +} + + +/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access + * TTM_PL_{VRAM,TT} directly. + */ + +static int +nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, + struct nouveau_bo *nvbo, bool evict, + bool no_wait_gpu, struct ttm_mem_reg *new_mem) +{ + struct nouveau_fence *fence = NULL; + int ret; + + ret = nouveau_fence_new(chan, false, &fence); + if (ret) + return ret; + + ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict, + no_wait_gpu, new_mem); + nouveau_fence_unref(&fence); + return ret; +} + +static int +nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) +{ + int ret = RING_SPACE(chan, 2); + if (ret == 0) { + BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); + OUT_RING (chan, handle); + FIRE_RING (chan); + } + return ret; +} + +static int +nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +{ + struct nouveau_mem *node = old_mem->mm_node; + int ret = RING_SPACE(chan, 10); + if (ret == 0) { + BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8); + OUT_RING (chan, upper_32_bits(node->vma[0].offset)); + OUT_RING (chan, lower_32_bits(node->vma[0].offset)); + OUT_RING (chan, upper_32_bits(node->vma[1].offset)); + OUT_RING (chan, lower_32_bits(node->vma[1].offset)); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, new_mem->num_pages); + BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386); + } + return ret; +} + +static int +nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle) +{ + int ret = RING_SPACE(chan, 2); + if (ret == 0) { + BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); + OUT_RING (chan, handle); + } + return ret; +} + +static int +nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +{ + struct nouveau_mem *node = old_mem->mm_node; + u64 src_offset = node->vma[0].offset; + u64 dst_offset = node->vma[1].offset; + u32 page_count = new_mem->num_pages; + int ret; + + page_count = new_mem->num_pages; + while (page_count) { + int line_count = (page_count > 8191) ? 8191 : page_count; + + ret = RING_SPACE(chan, 11); + if (ret) + return ret; + + BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8); + OUT_RING (chan, upper_32_bits(src_offset)); + OUT_RING (chan, lower_32_bits(src_offset)); + OUT_RING (chan, upper_32_bits(dst_offset)); + OUT_RING (chan, lower_32_bits(dst_offset)); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, line_count); + BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); + OUT_RING (chan, 0x00000110); + + page_count -= line_count; + src_offset += (PAGE_SIZE * line_count); + dst_offset += (PAGE_SIZE * line_count); + } + + return 0; +} + +static int +nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +{ + struct nouveau_mem *node = old_mem->mm_node; + u64 src_offset = node->vma[0].offset; + u64 dst_offset = node->vma[1].offset; + u32 page_count = new_mem->num_pages; + int ret; + + page_count = new_mem->num_pages; + while (page_count) { + int line_count = (page_count > 2047) ? 2047 : page_count; + + ret = RING_SPACE(chan, 12); + if (ret) + return ret; + + BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2); + OUT_RING (chan, upper_32_bits(dst_offset)); + OUT_RING (chan, lower_32_bits(dst_offset)); + BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6); + OUT_RING (chan, upper_32_bits(src_offset)); + OUT_RING (chan, lower_32_bits(src_offset)); + OUT_RING (chan, PAGE_SIZE); /* src_pitch */ + OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ + OUT_RING (chan, PAGE_SIZE); /* line_length */ + OUT_RING (chan, line_count); + BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); + OUT_RING (chan, 0x00100110); + + page_count -= line_count; + src_offset += (PAGE_SIZE * line_count); + dst_offset += (PAGE_SIZE * line_count); + } + + return 0; +} + +static int +nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +{ + struct nouveau_mem *node = old_mem->mm_node; + u64 src_offset = node->vma[0].offset; + u64 dst_offset = node->vma[1].offset; + u32 page_count = new_mem->num_pages; + int ret; + + page_count = new_mem->num_pages; + while (page_count) { + int line_count = (page_count > 8191) ? 8191 : page_count; + + ret = RING_SPACE(chan, 11); + if (ret) + return ret; + + BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); + OUT_RING (chan, upper_32_bits(src_offset)); + OUT_RING (chan, lower_32_bits(src_offset)); + OUT_RING (chan, upper_32_bits(dst_offset)); + OUT_RING (chan, lower_32_bits(dst_offset)); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, line_count); + BEGIN_NV04(chan, NvSubCopy, 0x0300, 1); + OUT_RING (chan, 0x00000110); + + page_count -= line_count; + src_offset += (PAGE_SIZE * line_count); + dst_offset += (PAGE_SIZE * line_count); + } + + return 0; +} + +static int +nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +{ + struct nouveau_mem *node = old_mem->mm_node; + int ret = RING_SPACE(chan, 7); + if (ret == 0) { + BEGIN_NV04(chan, NvSubCopy, 0x0320, 6); + OUT_RING (chan, upper_32_bits(node->vma[0].offset)); + OUT_RING (chan, lower_32_bits(node->vma[0].offset)); + OUT_RING (chan, upper_32_bits(node->vma[1].offset)); + OUT_RING (chan, lower_32_bits(node->vma[1].offset)); + OUT_RING (chan, 0x00000000 /* COPY */); + OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); + } + return ret; +} + +static int +nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +{ + struct nouveau_mem *node = old_mem->mm_node; + int ret = RING_SPACE(chan, 7); + if (ret == 0) { + BEGIN_NV04(chan, NvSubCopy, 0x0304, 6); + OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); + OUT_RING (chan, upper_32_bits(node->vma[0].offset)); + OUT_RING (chan, lower_32_bits(node->vma[0].offset)); + OUT_RING (chan, upper_32_bits(node->vma[1].offset)); + OUT_RING (chan, lower_32_bits(node->vma[1].offset)); + OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */); + } + return ret; +} + +static int +nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) +{ + int ret = RING_SPACE(chan, 6); + if (ret == 0) { + BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); + OUT_RING (chan, handle); + BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); + OUT_RING (chan, NvNotify0); + OUT_RING (chan, NvDmaFB); + OUT_RING (chan, NvDmaFB); + } + + return ret; +} + +static int +nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +{ + struct nouveau_mem *node = old_mem->mm_node; + struct nouveau_bo *nvbo = nouveau_bo(bo); + u64 length = (new_mem->num_pages << PAGE_SHIFT); + u64 src_offset = node->vma[0].offset; + u64 dst_offset = node->vma[1].offset; + int ret; + + while (length) { + u32 amount, stride, height; + + amount = min(length, (u64)(4 * 1024 * 1024)); + stride = 16 * 4; + height = amount / stride; + + if (old_mem->mem_type == TTM_PL_VRAM && + nouveau_bo_tile_layout(nvbo)) { + ret = RING_SPACE(chan, 8); + if (ret) + return ret; + + BEGIN_NV04(chan, NvSubCopy, 0x0200, 7); + OUT_RING (chan, 0); + OUT_RING (chan, 0); + OUT_RING (chan, stride); + OUT_RING (chan, height); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, 0); + } else { + ret = RING_SPACE(chan, 2); + if (ret) + return ret; + + BEGIN_NV04(chan, NvSubCopy, 0x0200, 1); + OUT_RING (chan, 1); + } + if (new_mem->mem_type == TTM_PL_VRAM && + nouveau_bo_tile_layout(nvbo)) { + ret = RING_SPACE(chan, 8); + if (ret) + return ret; + + BEGIN_NV04(chan, NvSubCopy, 0x021c, 7); + OUT_RING (chan, 0); + OUT_RING (chan, 0); + OUT_RING (chan, stride); + OUT_RING (chan, height); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, 0); + } else { + ret = RING_SPACE(chan, 2); + if (ret) + return ret; + + BEGIN_NV04(chan, NvSubCopy, 0x021c, 1); + OUT_RING (chan, 1); + } + + ret = RING_SPACE(chan, 14); + if (ret) + return ret; + + BEGIN_NV04(chan, NvSubCopy, 0x0238, 2); + OUT_RING (chan, upper_32_bits(src_offset)); + OUT_RING (chan, upper_32_bits(dst_offset)); + BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); + OUT_RING (chan, lower_32_bits(src_offset)); + OUT_RING (chan, lower_32_bits(dst_offset)); + OUT_RING (chan, stride); + OUT_RING (chan, stride); + OUT_RING (chan, stride); + OUT_RING (chan, height); + OUT_RING (chan, 0x00000101); + OUT_RING (chan, 0x00000000); + BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); + OUT_RING (chan, 0); + + length -= amount; + src_offset += amount; + dst_offset += amount; + } + + return 0; +} + +static int +nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) +{ + int ret = RING_SPACE(chan, 4); + if (ret == 0) { + BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); + OUT_RING (chan, handle); + BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); + OUT_RING (chan, NvNotify0); + } + + return ret; +} + +static inline uint32_t +nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, + struct nouveau_channel *chan, struct ttm_mem_reg *mem) +{ + if (mem->mem_type == TTM_PL_TT) + return NvDmaTT; + return NvDmaFB; +} + +static int +nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +{ + u32 src_offset = old_mem->start << PAGE_SHIFT; + u32 dst_offset = new_mem->start << PAGE_SHIFT; + u32 page_count = new_mem->num_pages; + int ret; + + ret = RING_SPACE(chan, 3); + if (ret) + return ret; + + BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); + OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); + OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); + + page_count = new_mem->num_pages; + while (page_count) { + int line_count = (page_count > 2047) ? 2047 : page_count; + + ret = RING_SPACE(chan, 11); + if (ret) + return ret; + + BEGIN_NV04(chan, NvSubCopy, + NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); + OUT_RING (chan, src_offset); + OUT_RING (chan, dst_offset); + OUT_RING (chan, PAGE_SIZE); /* src_pitch */ + OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ + OUT_RING (chan, PAGE_SIZE); /* line_length */ + OUT_RING (chan, line_count); + OUT_RING (chan, 0x00000101); + OUT_RING (chan, 0x00000000); + BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); + OUT_RING (chan, 0); + + page_count -= line_count; + src_offset += (PAGE_SIZE * line_count); + dst_offset += (PAGE_SIZE * line_count); + } + + return 0; +} + +static int +nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo, + struct ttm_mem_reg *mem, struct nouveau_vma *vma) +{ + struct nouveau_mem *node = mem->mm_node; + int ret; + + ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages << + PAGE_SHIFT, node->page_shift, + NV_MEM_ACCESS_RW, vma); + if (ret) + return ret; + + if (mem->mem_type == TTM_PL_VRAM) + nouveau_vm_map(vma, node); + else + nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node); + + return 0; +} + +static int +nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, + bool no_wait_gpu, struct ttm_mem_reg *new_mem) +{ + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct nouveau_channel *chan = chan = drm->channel; + struct nouveau_bo *nvbo = nouveau_bo(bo); + struct ttm_mem_reg *old_mem = &bo->mem; + int ret; + + mutex_lock(&chan->cli->mutex); + + /* create temporary vmas for the transfer and attach them to the + * old nouveau_mem node, these will get cleaned up after ttm has + * destroyed the ttm_mem_reg + */ + if (nv_device(drm->device)->card_type >= NV_50) { + struct nouveau_mem *node = old_mem->mm_node; + + ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]); + if (ret) + goto out; + + ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]); + if (ret) + goto out; + } + + ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); + if (ret == 0) { + ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, + no_wait_gpu, new_mem); + } + +out: + mutex_unlock(&chan->cli->mutex); + return ret; +} + +void +nouveau_bo_move_init(struct nouveau_drm *drm) +{ + static const struct { + const char *name; + int engine; + u32 oclass; + int (*exec)(struct nouveau_channel *, + struct ttm_buffer_object *, + struct ttm_mem_reg *, struct ttm_mem_reg *); + int (*init)(struct nouveau_channel *, u32 handle); + } _methods[] = { + { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, + { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, + { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, + { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init }, + { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init }, + { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init }, + { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init }, + { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init }, + {}, + { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init }, + }, *mthd = _methods; + const char *name = "CPU"; + int ret; + + do { + struct nouveau_object *object; + struct nouveau_channel *chan; + u32 handle = (mthd->engine << 16) | mthd->oclass; + + if (mthd->init == nve0_bo_move_init) + chan = drm->cechan; + else + chan = drm->channel; + if (chan == NULL) + continue; + + ret = nouveau_object_new(nv_object(drm), chan->handle, handle, + mthd->oclass, NULL, 0, &object); + if (ret == 0) { + ret = mthd->init(chan, handle); + if (ret) { + nouveau_object_del(nv_object(drm), + chan->handle, handle); + continue; + } + + drm->ttm.move = mthd->exec; + name = mthd->name; + break; + } + } while ((++mthd)->exec); + + NV_INFO(drm, "MM: using %s for buffer copies\n", name); +} + +static int +nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, + bool no_wait_gpu, struct ttm_mem_reg *new_mem) +{ + u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; + struct ttm_placement placement; + struct ttm_mem_reg tmp_mem; + int ret; + + placement.fpfn = placement.lpfn = 0; + placement.num_placement = placement.num_busy_placement = 1; + placement.placement = placement.busy_placement = &placement_memtype; + + tmp_mem = *new_mem; + tmp_mem.mm_node = NULL; + ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); + if (ret) + return ret; + + ret = ttm_tt_bind(bo->ttm, &tmp_mem); + if (ret) + goto out; + + ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem); + if (ret) + goto out; + + ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); +out: + ttm_bo_mem_put(bo, &tmp_mem); + return ret; +} + +static int +nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, + bool no_wait_gpu, struct ttm_mem_reg *new_mem) +{ + u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; + struct ttm_placement placement; + struct ttm_mem_reg tmp_mem; + int ret; + + placement.fpfn = placement.lpfn = 0; + placement.num_placement = placement.num_busy_placement = 1; + placement.placement = placement.busy_placement = &placement_memtype; + + tmp_mem = *new_mem; + tmp_mem.mm_node = NULL; + ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); + if (ret) + return ret; + + ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); + if (ret) + goto out; + + ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem); + if (ret) + goto out; + +out: + ttm_bo_mem_put(bo, &tmp_mem); + return ret; +} + +static void +nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) +{ + struct nouveau_bo *nvbo = nouveau_bo(bo); + struct nouveau_vma *vma; + + /* ttm can now (stupidly) pass the driver bos it didn't create... */ + if (bo->destroy != nouveau_bo_del_ttm) + return; + + list_for_each_entry(vma, &nvbo->vma_list, head) { + if (new_mem && new_mem->mem_type == TTM_PL_VRAM) { + nouveau_vm_map(vma, new_mem->mm_node); + } else + if (new_mem && new_mem->mem_type == TTM_PL_TT && + nvbo->page_shift == vma->vm->vmm->spg_shift) { + if (((struct nouveau_mem *)new_mem->mm_node)->sg) + nouveau_vm_map_sg_table(vma, 0, new_mem-> + num_pages << PAGE_SHIFT, + new_mem->mm_node); + else + nouveau_vm_map_sg(vma, 0, new_mem-> + num_pages << PAGE_SHIFT, + new_mem->mm_node); + } else { + nouveau_vm_unmap(vma); + } + } +} + +static int +nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, + struct nouveau_drm_tile **new_tile) +{ + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct drm_device *dev = drm->dev; + struct nouveau_bo *nvbo = nouveau_bo(bo); + u64 offset = new_mem->start << PAGE_SHIFT; + + *new_tile = NULL; + if (new_mem->mem_type != TTM_PL_VRAM) + return 0; + + if (nv_device(drm->device)->card_type >= NV_10) { + *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, + nvbo->tile_mode, + nvbo->tile_flags); + } + + return 0; +} + +static void +nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, + struct nouveau_drm_tile *new_tile, + struct nouveau_drm_tile **old_tile) +{ + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct drm_device *dev = drm->dev; + + nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj); + *old_tile = new_tile; +} + +static int +nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, + bool no_wait_gpu, struct ttm_mem_reg *new_mem) +{ + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + struct ttm_mem_reg *old_mem = &bo->mem; + struct nouveau_drm_tile *new_tile = NULL; + int ret = 0; + + if (nv_device(drm->device)->card_type < NV_50) { + ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); + if (ret) + return ret; + } + + /* Fake bo copy. */ + if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { + BUG_ON(bo->mem.mm_node != NULL); + bo->mem = *new_mem; + new_mem->mm_node = NULL; + goto out; + } + + /* CPU copy if we have no accelerated method available */ + if (!drm->ttm.move) { + ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + goto out; + } + + /* Hardware assisted copy. */ + if (new_mem->mem_type == TTM_PL_SYSTEM) + ret = nouveau_bo_move_flipd(bo, evict, intr, + no_wait_gpu, new_mem); + else if (old_mem->mem_type == TTM_PL_SYSTEM) + ret = nouveau_bo_move_flips(bo, evict, intr, + no_wait_gpu, new_mem); + else + ret = nouveau_bo_move_m2mf(bo, evict, intr, + no_wait_gpu, new_mem); + + if (!ret) + goto out; + + /* Fallback to software copy. */ + ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + +out: + if (nv_device(drm->device)->card_type < NV_50) { + if (ret) + nouveau_bo_vm_cleanup(bo, NULL, &new_tile); + else + nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); + } + + return ret; +} + +static int +nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) +{ + return 0; +} + +static int +nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + struct nouveau_drm *drm = nouveau_bdev(bdev); + struct drm_device *dev = drm->dev; + int ret; + + mem->bus.addr = NULL; + mem->bus.offset = 0; + mem->bus.size = mem->num_pages << PAGE_SHIFT; + mem->bus.base = 0; + mem->bus.is_iomem = false; + if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) + return -EINVAL; + switch (mem->mem_type) { + case TTM_PL_SYSTEM: + /* System memory */ + return 0; + case TTM_PL_TT: +#if __OS_HAS_AGP + if (drm->agp.stat == ENABLED) { + mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.base = drm->agp.base; + mem->bus.is_iomem = !dev->agp->cant_use_aperture; + } +#endif + break; + case TTM_PL_VRAM: + mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.base = pci_resource_start(dev->pdev, 1); + mem->bus.is_iomem = true; + if (nv_device(drm->device)->card_type >= NV_50) { + struct nouveau_bar *bar = nouveau_bar(drm->device); + struct nouveau_mem *node = mem->mm_node; + + ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, + &node->bar_vma); + if (ret) + return ret; + + mem->bus.offset = node->bar_vma.offset; + } + break; + default: + return -EINVAL; + } + return 0; +} + +static void +nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +{ + struct nouveau_drm *drm = nouveau_bdev(bdev); + struct nouveau_bar *bar = nouveau_bar(drm->device); + struct nouveau_mem *node = mem->mm_node; + + if (!node->bar_vma.node) + return; + + bar->unmap(bar, &node->bar_vma); +} + +static int +nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) +{ + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + struct nouveau_device *device = nv_device(drm->device); + u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT; + + /* as long as the bo isn't in vram, and isn't tiled, we've got + * nothing to do here. + */ + if (bo->mem.mem_type != TTM_PL_VRAM) { + if (nv_device(drm->device)->card_type < NV_50 || + !nouveau_bo_tile_layout(nvbo)) + return 0; + } + + /* make sure bo is in mappable vram */ + if (bo->mem.start + bo->mem.num_pages < mappable) + return 0; + + + nvbo->placement.fpfn = 0; + nvbo->placement.lpfn = mappable; + nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); + return nouveau_bo_validate(nvbo, false, false); +} + +static int +nouveau_ttm_tt_populate(struct ttm_tt *ttm) +{ + struct ttm_dma_tt *ttm_dma = (void *)ttm; + struct nouveau_drm *drm; + struct drm_device *dev; + unsigned i; + int r; + bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); + + if (ttm->state != tt_unpopulated) + return 0; + + if (slave && ttm->sg) { + /* make userspace faulting work */ + drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, + ttm_dma->dma_address, ttm->num_pages); + ttm->state = tt_unbound; + return 0; + } + + drm = nouveau_bdev(ttm->bdev); + dev = drm->dev; + +#if __OS_HAS_AGP + if (drm->agp.stat == ENABLED) { + return ttm_agp_tt_populate(ttm); + } +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +#ifdef CONFIG_SWIOTLB + if (swiotlb_nr_tbl()) { + return ttm_dma_populate((void *)ttm, dev->dev); + } +#endif +#endif + + r = ttm_pool_populate(ttm); + if (r) { + return r; + } + + for (i = 0; i < ttm->num_pages; i++) { + ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i], + 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) { + while (--i) { + pci_unmap_page(dev->pdev, ttm_dma->dma_address[i], + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + ttm_dma->dma_address[i] = 0; + } + ttm_pool_unpopulate(ttm); + return -EFAULT; + } + } + return 0; +} + +static void +nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) +{ + struct ttm_dma_tt *ttm_dma = (void *)ttm; + struct nouveau_drm *drm; + struct drm_device *dev; + unsigned i; + bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); + + if (slave) + return; + + drm = nouveau_bdev(ttm->bdev); + dev = drm->dev; + +#if __OS_HAS_AGP + if (drm->agp.stat == ENABLED) { + ttm_agp_tt_unpopulate(ttm); + return; + } +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +#ifdef CONFIG_SWIOTLB + if (swiotlb_nr_tbl()) { + ttm_dma_unpopulate((void *)ttm, dev->dev); + return; + } +#endif +#endif + + for (i = 0; i < ttm->num_pages; i++) { + if (ttm_dma->dma_address[i]) { + pci_unmap_page(dev->pdev, ttm_dma->dma_address[i], + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + } + } + + ttm_pool_unpopulate(ttm); +} + +void +nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) +{ + struct nouveau_fence *old_fence = NULL; + + if (likely(fence)) + nouveau_fence_ref(fence); + + spin_lock(&nvbo->bo.bdev->fence_lock); + old_fence = nvbo->bo.sync_obj; + nvbo->bo.sync_obj = fence; + spin_unlock(&nvbo->bo.bdev->fence_lock); + + nouveau_fence_unref(&old_fence); +} + +static void +nouveau_bo_fence_unref(void **sync_obj) +{ + nouveau_fence_unref((struct nouveau_fence **)sync_obj); +} + +static void * +nouveau_bo_fence_ref(void *sync_obj) +{ + return nouveau_fence_ref(sync_obj); +} + +static bool +nouveau_bo_fence_signalled(void *sync_obj) +{ + return nouveau_fence_done(sync_obj); +} + +static int +nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr) +{ + return nouveau_fence_wait(sync_obj, lazy, intr); +} + +static int +nouveau_bo_fence_flush(void *sync_obj) +{ + return 0; +} + +struct ttm_bo_driver nouveau_bo_driver = { + .ttm_tt_create = &nouveau_ttm_tt_create, + .ttm_tt_populate = &nouveau_ttm_tt_populate, + .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, + .invalidate_caches = nouveau_bo_invalidate_caches, + .init_mem_type = nouveau_bo_init_mem_type, + .evict_flags = nouveau_bo_evict_flags, + .move_notify = nouveau_bo_move_ntfy, + .move = nouveau_bo_move, + .verify_access = nouveau_bo_verify_access, + .sync_obj_signaled = nouveau_bo_fence_signalled, + .sync_obj_wait = nouveau_bo_fence_wait, + .sync_obj_flush = nouveau_bo_fence_flush, + .sync_obj_unref = nouveau_bo_fence_unref, + .sync_obj_ref = nouveau_bo_fence_ref, + .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, + .io_mem_reserve = &nouveau_ttm_io_mem_reserve, + .io_mem_free = &nouveau_ttm_io_mem_free, +}; + +struct nouveau_vma * +nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm) +{ + struct nouveau_vma *vma; + list_for_each_entry(vma, &nvbo->vma_list, head) { + if (vma->vm == vm) + return vma; + } + + return NULL; +} + +int +nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, + struct nouveau_vma *vma) +{ + const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; + struct nouveau_mem *node = nvbo->bo.mem.mm_node; + int ret; + + ret = nouveau_vm_get(vm, size, nvbo->page_shift, + NV_MEM_ACCESS_RW, vma); + if (ret) + return ret; + + if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) + nouveau_vm_map(vma, nvbo->bo.mem.mm_node); + else if (nvbo->bo.mem.mem_type == TTM_PL_TT) { + if (node->sg) + nouveau_vm_map_sg_table(vma, 0, size, node); + else + nouveau_vm_map_sg(vma, 0, size, node); + } + + list_add_tail(&vma->head, &nvbo->vma_list); + vma->refcount = 1; + return 0; +} + +void +nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma) +{ + if (vma->node) { + if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) { + spin_lock(&nvbo->bo.bdev->fence_lock); + ttm_bo_wait(&nvbo->bo, false, false, false); + spin_unlock(&nvbo->bo.bdev->fence_lock); + nouveau_vm_unmap(vma); + } + + nouveau_vm_put(vma); + list_del(&vma->head); + } +} diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h new file mode 100644 index 0000000..653dbbb --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -0,0 +1,100 @@ +#ifndef __NOUVEAU_BO_H__ +#define __NOUVEAU_BO_H__ + +struct nouveau_channel; +struct nouveau_fence; +struct nouveau_vma; + +struct nouveau_bo { + struct ttm_buffer_object bo; + struct ttm_placement placement; + u32 valid_domains; + u32 placements[3]; + u32 busy_placements[3]; + struct ttm_bo_kmap_obj kmap; + struct list_head head; + + /* protected by ttm_bo_reserve() */ + struct drm_file *reserved_by; + struct list_head entry; + int pbbo_index; + bool validate_mapped; + + struct list_head vma_list; + unsigned page_shift; + + u32 tile_mode; + u32 tile_flags; + struct nouveau_drm_tile *tile; + + struct drm_gem_object *gem; + + /* protect by the ttm reservation lock */ + int pin_refcnt; + + struct ttm_bo_kmap_obj dma_buf_vmap; +}; + +static inline struct nouveau_bo * +nouveau_bo(struct ttm_buffer_object *bo) +{ + return container_of(bo, struct nouveau_bo, bo); +} + +static inline int +nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) +{ + struct nouveau_bo *prev; + + if (!pnvbo) + return -EINVAL; + prev = *pnvbo; + + *pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL; + if (prev) { + struct ttm_buffer_object *bo = &prev->bo; + + ttm_bo_unref(&bo); + } + + return 0; +} + +extern struct ttm_bo_driver nouveau_bo_driver; + +void nouveau_bo_move_init(struct nouveau_drm *); +int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags, + u32 tile_mode, u32 tile_flags, struct sg_table *sg, + struct nouveau_bo **); +int nouveau_bo_pin(struct nouveau_bo *, u32 flags); +int nouveau_bo_unpin(struct nouveau_bo *); +int nouveau_bo_map(struct nouveau_bo *); +void nouveau_bo_unmap(struct nouveau_bo *); +void nouveau_bo_placement_set(struct nouveau_bo *, u32 type, u32 busy); +u16 nouveau_bo_rd16(struct nouveau_bo *, unsigned index); +void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val); +u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index); +void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val); +void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); +int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, + bool no_wait_gpu); + +struct nouveau_vma * +nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *); + +int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *, + struct nouveau_vma *); +void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *); + +/* TODO: submit equivalent to TTM generic API upstream? */ +static inline void __iomem * +nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo) +{ + bool is_iomem; + void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual( + &nvbo->kmap, &is_iomem); + WARN_ON_ONCE(ioptr && !is_iomem); + return ioptr; +} + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c new file mode 100644 index 0000000..eaa80a2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -0,0 +1,403 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <core/client.h> +#include <core/device.h> +#include <core/class.h> + +#include <subdev/fb.h> +#include <subdev/vm.h> +#include <subdev/instmem.h> + +#include <engine/software.h> + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_bo.h" +#include "nouveau_chan.h" +#include "nouveau_fence.h" +#include "nouveau_abi16.h" + +MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM"); +static int nouveau_vram_pushbuf; +module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); + +int +nouveau_channel_idle(struct nouveau_channel *chan) +{ + struct nouveau_cli *cli = chan->cli; + struct nouveau_fence *fence = NULL; + int ret; + + ret = nouveau_fence_new(chan, false, &fence); + if (!ret) { + ret = nouveau_fence_wait(fence, false, false); + nouveau_fence_unref(&fence); + } + + if (ret) + NV_ERROR(cli, "failed to idle channel 0x%08x [%s]\n", + chan->handle, cli->base.name); + return ret; +} + +void +nouveau_channel_del(struct nouveau_channel **pchan) +{ + struct nouveau_channel *chan = *pchan; + if (chan) { + struct nouveau_object *client = nv_object(chan->cli); + if (chan->fence) { + nouveau_channel_idle(chan); + nouveau_fence(chan->drm)->context_del(chan); + } + nouveau_object_del(client, NVDRM_DEVICE, chan->handle); + nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle); + nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); + nouveau_bo_unmap(chan->push.buffer); + if (chan->push.buffer && chan->push.buffer->pin_refcnt) + nouveau_bo_unpin(chan->push.buffer); + nouveau_bo_ref(NULL, &chan->push.buffer); + kfree(chan); + } + *pchan = NULL; +} + +static int +nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli, + u32 parent, u32 handle, u32 size, + struct nouveau_channel **pchan) +{ + struct nouveau_device *device = nv_device(drm->device); + struct nouveau_instmem *imem = nouveau_instmem(device); + struct nouveau_vmmgr *vmm = nouveau_vmmgr(device); + struct nouveau_fb *pfb = nouveau_fb(device); + struct nouveau_client *client = &cli->base; + struct nv_dma_class args = {}; + struct nouveau_channel *chan; + struct nouveau_object *push; + u32 target; + int ret; + + chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL); + if (!chan) + return -ENOMEM; + + chan->cli = cli; + chan->drm = drm; + chan->handle = handle; + + /* allocate memory for dma push buffer */ + target = TTM_PL_FLAG_TT; + if (nouveau_vram_pushbuf) + target = TTM_PL_FLAG_VRAM; + + ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, + &chan->push.buffer); + if (ret == 0) { + ret = nouveau_bo_pin(chan->push.buffer, target); + if (ret == 0) + ret = nouveau_bo_map(chan->push.buffer); + } + + if (ret) { + nouveau_channel_del(pchan); + return ret; + } + + /* create dma object covering the *entire* memory space that the + * pushbuf lives in, this is because the GEM code requires that + * we be able to call out to other (indirect) push buffers + */ + chan->push.vma.offset = chan->push.buffer->bo.offset; + chan->push.handle = NVDRM_PUSH | (handle & 0xffff); + + if (device->card_type >= NV_50) { + ret = nouveau_bo_vma_add(chan->push.buffer, client->vm, + &chan->push.vma); + if (ret) { + nouveau_channel_del(pchan); + return ret; + } + + args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; + args.start = 0; + args.limit = client->vm->vmm->limit - 1; + } else + if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) { + u64 limit = pfb->ram.size - imem->reserved - 1; + if (device->card_type == NV_04) { + /* nv04 vram pushbuf hack, retarget to its location in + * the framebuffer bar rather than direct vram access.. + * nfi why this exists, it came from the -nv ddx. + */ + args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR; + args.start = pci_resource_start(device->pdev, 1); + args.limit = args.start + limit; + } else { + args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR; + args.start = 0; + args.limit = limit; + } + } else { + if (chan->drm->agp.stat == ENABLED) { + args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR; + args.start = chan->drm->agp.base; + args.limit = chan->drm->agp.base + + chan->drm->agp.size - 1; + } else { + args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR; + args.start = 0; + args.limit = vmm->limit - 1; + } + } + + ret = nouveau_object_new(nv_object(chan->cli), parent, + chan->push.handle, 0x0002, + &args, sizeof(args), &push); + if (ret) { + nouveau_channel_del(pchan); + return ret; + } + + return 0; +} + +static int +nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli, + u32 parent, u32 handle, u32 engine, + struct nouveau_channel **pchan) +{ + static const u16 oclasses[] = { NVE0_CHANNEL_IND_CLASS, + NVC0_CHANNEL_IND_CLASS, + NV84_CHANNEL_IND_CLASS, + NV50_CHANNEL_IND_CLASS, + 0 }; + const u16 *oclass = oclasses; + struct nve0_channel_ind_class args; + struct nouveau_channel *chan; + int ret; + + /* allocate dma push buffer */ + ret = nouveau_channel_prep(drm, cli, parent, handle, 0x12000, &chan); + *pchan = chan; + if (ret) + return ret; + + /* create channel object */ + args.pushbuf = chan->push.handle; + args.ioffset = 0x10000 + chan->push.vma.offset; + args.ilength = 0x02000; + args.engine = engine; + + do { + ret = nouveau_object_new(nv_object(cli), parent, handle, + *oclass++, &args, sizeof(args), + &chan->object); + if (ret == 0) + return ret; + } while (*oclass); + + nouveau_channel_del(pchan); + return ret; +} + +static int +nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli, + u32 parent, u32 handle, struct nouveau_channel **pchan) +{ + static const u16 oclasses[] = { NV40_CHANNEL_DMA_CLASS, + NV17_CHANNEL_DMA_CLASS, + NV10_CHANNEL_DMA_CLASS, + NV03_CHANNEL_DMA_CLASS, + 0 }; + const u16 *oclass = oclasses; + struct nv03_channel_dma_class args; + struct nouveau_channel *chan; + int ret; + + /* allocate dma push buffer */ + ret = nouveau_channel_prep(drm, cli, parent, handle, 0x10000, &chan); + *pchan = chan; + if (ret) + return ret; + + /* create channel object */ + args.pushbuf = chan->push.handle; + args.offset = chan->push.vma.offset; + + do { + ret = nouveau_object_new(nv_object(cli), parent, handle, + *oclass++, &args, sizeof(args), + &chan->object); + if (ret == 0) + return ret; + } while (ret && *oclass); + + nouveau_channel_del(pchan); + return ret; +} + +static int +nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) +{ + struct nouveau_client *client = nv_client(chan->cli); + struct nouveau_device *device = nv_device(chan->drm->device); + struct nouveau_instmem *imem = nouveau_instmem(device); + struct nouveau_vmmgr *vmm = nouveau_vmmgr(device); + struct nouveau_fb *pfb = nouveau_fb(device); + struct nouveau_software_chan *swch; + struct nouveau_object *object; + struct nv_dma_class args = {}; + int ret, i; + + /* allocate dma objects to cover all allowed vram, and gart */ + if (device->card_type < NV_C0) { + if (device->card_type >= NV_50) { + args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; + args.start = 0; + args.limit = client->vm->vmm->limit - 1; + } else { + args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR; + args.start = 0; + args.limit = pfb->ram.size - imem->reserved - 1; + } + + ret = nouveau_object_new(nv_object(client), chan->handle, vram, + 0x003d, &args, sizeof(args), &object); + if (ret) + return ret; + + if (device->card_type >= NV_50) { + args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; + args.start = 0; + args.limit = client->vm->vmm->limit - 1; + } else + if (chan->drm->agp.stat == ENABLED) { + args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR; + args.start = chan->drm->agp.base; + args.limit = chan->drm->agp.base + + chan->drm->agp.size - 1; + } else { + args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR; + args.start = 0; + args.limit = vmm->limit - 1; + } + + ret = nouveau_object_new(nv_object(client), chan->handle, gart, + 0x003d, &args, sizeof(args), &object); + if (ret) + return ret; + + chan->vram = vram; + chan->gart = gart; + } + + /* initialise dma tracking parameters */ + switch (nv_hclass(chan->object) & 0x00ff) { + case 0x006b: + case 0x006e: + chan->user_put = 0x40; + chan->user_get = 0x44; + chan->dma.max = (0x10000 / 4) - 2; + break; + default: + chan->user_put = 0x40; + chan->user_get = 0x44; + chan->user_get_hi = 0x60; + chan->dma.ib_base = 0x10000 / 4; + chan->dma.ib_max = (0x02000 / 8) - 1; + chan->dma.ib_put = 0; + chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put; + chan->dma.max = chan->dma.ib_base; + break; + } + + chan->dma.put = 0; + chan->dma.cur = chan->dma.put; + chan->dma.free = chan->dma.max - chan->dma.cur; + + ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); + if (ret) + return ret; + + for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) + OUT_RING(chan, 0x00000000); + + /* allocate software object class (used for fences on <= nv05, and + * to signal flip completion), bind it to a subchannel. + */ + if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) { + ret = nouveau_object_new(nv_object(client), chan->handle, + NvSw, nouveau_abi16_swclass(chan->drm), + NULL, 0, &object); + if (ret) + return ret; + + swch = (void *)object->parent; + swch->flip = nouveau_flip_complete; + swch->flip_data = chan; + } + + if (device->card_type < NV_C0) { + ret = RING_SPACE(chan, 2); + if (ret) + return ret; + + BEGIN_NV04(chan, NvSubSw, 0x0000, 1); + OUT_RING (chan, NvSw); + FIRE_RING (chan); + } + + /* initialise synchronisation */ + return nouveau_fence(chan->drm)->context_new(chan); +} + +int +nouveau_channel_new(struct nouveau_drm *drm, struct nouveau_cli *cli, + u32 parent, u32 handle, u32 arg0, u32 arg1, + struct nouveau_channel **pchan) +{ + int ret; + + ret = nouveau_channel_ind(drm, cli, parent, handle, arg0, pchan); + if (ret) { + NV_DEBUG(cli, "ib channel create, %d\n", ret); + ret = nouveau_channel_dma(drm, cli, parent, handle, pchan); + if (ret) { + NV_DEBUG(cli, "dma channel create, %d\n", ret); + return ret; + } + } + + ret = nouveau_channel_init(*pchan, arg0, arg1); + if (ret) { + NV_ERROR(cli, "channel failed to initialise, %d\n", ret); + nouveau_channel_del(pchan); + return ret; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h new file mode 100644 index 0000000..40f97e2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_chan.h @@ -0,0 +1,47 @@ +#ifndef __NOUVEAU_CHAN_H__ +#define __NOUVEAU_CHAN_H__ + +struct nouveau_cli; + +struct nouveau_channel { + struct nouveau_cli *cli; + struct nouveau_drm *drm; + + u32 handle; + u32 vram; + u32 gart; + + struct { + struct nouveau_bo *buffer; + struct nouveau_vma vma; + u32 handle; + } push; + + /* TODO: this will be reworked in the near future */ + bool accel_done; + void *fence; + struct { + int max; + int free; + int cur; + int put; + int ib_base; + int ib_max; + int ib_free; + int ib_put; + } dma; + u32 user_get_hi; + u32 user_get; + u32 user_put; + + struct nouveau_object *object; +}; + + +int nouveau_channel_new(struct nouveau_drm *, struct nouveau_cli *, + u32 parent, u32 handle, u32 arg0, u32 arg1, + struct nouveau_channel **); +void nouveau_channel_del(struct nouveau_channel **); +int nouveau_channel_idle(struct nouveau_channel *); + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c new file mode 100644 index 0000000..4da776f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -0,0 +1,1150 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <acpi/button.h> + +#include <drm/drmP.h> +#include <drm/drm_edid.h> +#include <drm/drm_crtc_helper.h> + +#include "nouveau_reg.h" +#include "nouveau_drm.h" +#include "dispnv04/hw.h" +#include "nouveau_acpi.h" + +#include "nouveau_display.h" +#include "nouveau_connector.h" +#include "nouveau_encoder.h" +#include "nouveau_crtc.h" + +#include <subdev/i2c.h> +#include <subdev/gpio.h> + +MODULE_PARM_DESC(tv_disable, "Disable TV-out detection"); +static int nouveau_tv_disable = 0; +module_param_named(tv_disable, nouveau_tv_disable, int, 0400); + +MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status"); +static int nouveau_ignorelid = 0; +module_param_named(ignorelid, nouveau_ignorelid, int, 0400); + +MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (default: enabled)"); +static int nouveau_duallink = 1; +module_param_named(duallink, nouveau_duallink, int, 0400); + +struct nouveau_encoder * +find_encoder(struct drm_connector *connector, int type) +{ + struct drm_device *dev = connector->dev; + struct nouveau_encoder *nv_encoder; + struct drm_mode_object *obj; + int i, id; + + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + id = connector->encoder_ids[i]; + if (!id) + break; + + obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER); + if (!obj) + continue; + nv_encoder = nouveau_encoder(obj_to_encoder(obj)); + + if (type == DCB_OUTPUT_ANY || nv_encoder->dcb->type == type) + return nv_encoder; + } + + return NULL; +} + +struct nouveau_connector * +nouveau_encoder_connector_get(struct nouveau_encoder *encoder) +{ + struct drm_device *dev = to_drm_encoder(encoder)->dev; + struct drm_connector *drm_connector; + + list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) { + if (drm_connector->encoder == to_drm_encoder(encoder)) + return nouveau_connector(drm_connector); + } + + return NULL; +} + +static void +nouveau_connector_destroy(struct drm_connector *connector) +{ + struct nouveau_connector *nv_connector = nouveau_connector(connector); + kfree(nv_connector->edid); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + +static struct nouveau_i2c_port * +nouveau_connector_ddc_detect(struct drm_connector *connector, + struct nouveau_encoder **pnv_encoder) +{ + struct drm_device *dev = connector->dev; + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_gpio *gpio = nouveau_gpio(drm->device); + struct nouveau_i2c_port *port = NULL; + int i, panel = -ENODEV; + + /* eDP panels need powering on by us (if the VBIOS doesn't default it + * to on) before doing any AUX channel transactions. LVDS panel power + * is handled by the SOR itself, and not required for LVDS DDC. + */ + if (nv_connector->type == DCB_CONNECTOR_eDP) { + panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff); + if (panel == 0) { + gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1); + msleep(300); + } + } + + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + struct nouveau_encoder *nv_encoder; + struct drm_mode_object *obj; + int id; + + id = connector->encoder_ids[i]; + if (!id) + break; + + obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER); + if (!obj) + continue; + nv_encoder = nouveau_encoder(obj_to_encoder(obj)); + + port = nv_encoder->i2c; + if (port && nv_probe_i2c(port, 0x50)) { + *pnv_encoder = nv_encoder; + break; + } + + port = NULL; + } + + /* eDP panel not detected, restore panel power GPIO to previous + * state to avoid confusing the SOR for other output types. + */ + if (!port && panel == 0) + gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel); + + return port; +} + +static struct nouveau_encoder * +nouveau_connector_of_detect(struct drm_connector *connector) +{ +#ifdef __powerpc__ + struct drm_device *dev = connector->dev; + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder; + struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev); + + if (!dn || + !((nv_encoder = find_encoder(connector, DCB_OUTPUT_TMDS)) || + (nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG)))) + return NULL; + + for_each_child_of_node(dn, cn) { + const char *name = of_get_property(cn, "name", NULL); + const void *edid = of_get_property(cn, "EDID", NULL); + int idx = name ? name[strlen(name) - 1] - 'A' : 0; + + if (nv_encoder->dcb->i2c_index == idx && edid) { + nv_connector->edid = + kmemdup(edid, EDID_LENGTH, GFP_KERNEL); + of_node_put(cn); + return nv_encoder; + } + } +#endif + return NULL; +} + +static void +nouveau_connector_set_encoder(struct drm_connector *connector, + struct nouveau_encoder *nv_encoder) +{ + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_drm *drm = nouveau_drm(connector->dev); + struct drm_device *dev = connector->dev; + + if (nv_connector->detected_encoder == nv_encoder) + return; + nv_connector->detected_encoder = nv_encoder; + + if (nv_device(drm->device)->card_type >= NV_50) { + connector->interlace_allowed = true; + connector->doublescan_allowed = true; + } else + if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS || + nv_encoder->dcb->type == DCB_OUTPUT_TMDS) { + connector->doublescan_allowed = false; + connector->interlace_allowed = false; + } else { + connector->doublescan_allowed = true; + if (nv_device(drm->device)->card_type == NV_20 || + (nv_device(drm->device)->card_type == NV_10 && + (dev->pci_device & 0x0ff0) != 0x0100 && + (dev->pci_device & 0x0ff0) != 0x0150)) + /* HW is broken */ + connector->interlace_allowed = false; + else + connector->interlace_allowed = true; + } + + if (nv_connector->type == DCB_CONNECTOR_DVI_I) { + drm_object_property_set_value(&connector->base, + dev->mode_config.dvi_i_subconnector_property, + nv_encoder->dcb->type == DCB_OUTPUT_TMDS ? + DRM_MODE_SUBCONNECTOR_DVID : + DRM_MODE_SUBCONNECTOR_DVIA); + } +} + +static enum drm_connector_status +nouveau_connector_detect(struct drm_connector *connector, bool force) +{ + struct drm_device *dev = connector->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder = NULL; + struct nouveau_encoder *nv_partner; + struct nouveau_i2c_port *i2c; + int type; + + /* Cleanup the previous EDID block. */ + if (nv_connector->edid) { + drm_mode_connector_update_edid_property(connector, NULL); + kfree(nv_connector->edid); + nv_connector->edid = NULL; + } + + i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); + if (i2c) { + nv_connector->edid = drm_get_edid(connector, &i2c->adapter); + drm_mode_connector_update_edid_property(connector, + nv_connector->edid); + if (!nv_connector->edid) { + NV_ERROR(drm, "DDC responded, but no EDID for %s\n", + drm_get_connector_name(connector)); + goto detect_analog; + } + + if (nv_encoder->dcb->type == DCB_OUTPUT_DP && + !nouveau_dp_detect(to_drm_encoder(nv_encoder))) { + NV_ERROR(drm, "Detected %s, but failed init\n", + drm_get_connector_name(connector)); + return connector_status_disconnected; + } + + /* Override encoder type for DVI-I based on whether EDID + * says the display is digital or analog, both use the + * same i2c channel so the value returned from ddc_detect + * isn't necessarily correct. + */ + nv_partner = NULL; + if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS) + nv_partner = find_encoder(connector, DCB_OUTPUT_ANALOG); + if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG) + nv_partner = find_encoder(connector, DCB_OUTPUT_TMDS); + + if (nv_partner && ((nv_encoder->dcb->type == DCB_OUTPUT_ANALOG && + nv_partner->dcb->type == DCB_OUTPUT_TMDS) || + (nv_encoder->dcb->type == DCB_OUTPUT_TMDS && + nv_partner->dcb->type == DCB_OUTPUT_ANALOG))) { + if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL) + type = DCB_OUTPUT_TMDS; + else + type = DCB_OUTPUT_ANALOG; + + nv_encoder = find_encoder(connector, type); + } + + nouveau_connector_set_encoder(connector, nv_encoder); + return connector_status_connected; + } + + nv_encoder = nouveau_connector_of_detect(connector); + if (nv_encoder) { + nouveau_connector_set_encoder(connector, nv_encoder); + return connector_status_connected; + } + +detect_analog: + nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG); + if (!nv_encoder && !nouveau_tv_disable) + nv_encoder = find_encoder(connector, DCB_OUTPUT_TV); + if (nv_encoder && force) { + struct drm_encoder *encoder = to_drm_encoder(nv_encoder); + struct drm_encoder_helper_funcs *helper = + encoder->helper_private; + + if (helper->detect(encoder, connector) == + connector_status_connected) { + nouveau_connector_set_encoder(connector, nv_encoder); + return connector_status_connected; + } + + } + + return connector_status_disconnected; +} + +static enum drm_connector_status +nouveau_connector_detect_lvds(struct drm_connector *connector, bool force) +{ + struct drm_device *dev = connector->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder = NULL; + enum drm_connector_status status = connector_status_disconnected; + + /* Cleanup the previous EDID block. */ + if (nv_connector->edid) { + drm_mode_connector_update_edid_property(connector, NULL); + kfree(nv_connector->edid); + nv_connector->edid = NULL; + } + + nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS); + if (!nv_encoder) + return connector_status_disconnected; + + /* Try retrieving EDID via DDC */ + if (!drm->vbios.fp_no_ddc) { + status = nouveau_connector_detect(connector, force); + if (status == connector_status_connected) + goto out; + } + + /* On some laptops (Sony, i'm looking at you) there appears to + * be no direct way of accessing the panel's EDID. The only + * option available to us appears to be to ask ACPI for help.. + * + * It's important this check's before trying straps, one of the + * said manufacturer's laptops are configured in such a way + * the nouveau decides an entry in the VBIOS FP mode table is + * valid - it's not (rh#613284) + */ + if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) { + if ((nv_connector->edid = nouveau_acpi_edid(dev, connector))) { + status = connector_status_connected; + goto out; + } + } + + /* If no EDID found above, and the VBIOS indicates a hardcoded + * modeline is avalilable for the panel, set it as the panel's + * native mode and exit. + */ + if (nouveau_bios_fp_mode(dev, NULL) && (drm->vbios.fp_no_ddc || + nv_encoder->dcb->lvdsconf.use_straps_for_mode)) { + status = connector_status_connected; + goto out; + } + + /* Still nothing, some VBIOS images have a hardcoded EDID block + * stored for the panel stored in them. + */ + if (!drm->vbios.fp_no_ddc) { + struct edid *edid = + (struct edid *)nouveau_bios_embedded_edid(dev); + if (edid) { + nv_connector->edid = + kmemdup(edid, EDID_LENGTH, GFP_KERNEL); + if (nv_connector->edid) + status = connector_status_connected; + } + } + +out: +#if defined(CONFIG_ACPI_BUTTON) || \ + (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE)) + if (status == connector_status_connected && + !nouveau_ignorelid && !acpi_lid_open()) + status = connector_status_unknown; +#endif + + drm_mode_connector_update_edid_property(connector, nv_connector->edid); + nouveau_connector_set_encoder(connector, nv_encoder); + return status; +} + +static void +nouveau_connector_force(struct drm_connector *connector) +{ + struct nouveau_drm *drm = nouveau_drm(connector->dev); + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder; + int type; + + if (nv_connector->type == DCB_CONNECTOR_DVI_I) { + if (connector->force == DRM_FORCE_ON_DIGITAL) + type = DCB_OUTPUT_TMDS; + else + type = DCB_OUTPUT_ANALOG; + } else + type = DCB_OUTPUT_ANY; + + nv_encoder = find_encoder(connector, type); + if (!nv_encoder) { + NV_ERROR(drm, "can't find encoder to force %s on!\n", + drm_get_connector_name(connector)); + connector->status = connector_status_disconnected; + return; + } + + nouveau_connector_set_encoder(connector, nv_encoder); +} + +static int +nouveau_connector_set_property(struct drm_connector *connector, + struct drm_property *property, uint64_t value) +{ + struct nouveau_display *disp = nouveau_display(connector->dev); + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; + struct drm_encoder *encoder = to_drm_encoder(nv_encoder); + struct drm_device *dev = connector->dev; + struct nouveau_crtc *nv_crtc; + int ret; + + nv_crtc = NULL; + if (connector->encoder && connector->encoder->crtc) + nv_crtc = nouveau_crtc(connector->encoder->crtc); + + /* Scaling mode */ + if (property == dev->mode_config.scaling_mode_property) { + bool modeset = false; + + switch (value) { + case DRM_MODE_SCALE_NONE: + case DRM_MODE_SCALE_FULLSCREEN: + case DRM_MODE_SCALE_CENTER: + case DRM_MODE_SCALE_ASPECT: + break; + default: + return -EINVAL; + } + + /* LVDS always needs gpu scaling */ + if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && + value == DRM_MODE_SCALE_NONE) + return -EINVAL; + + /* Changing between GPU and panel scaling requires a full + * modeset + */ + if ((nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) || + (value == DRM_MODE_SCALE_NONE)) + modeset = true; + nv_connector->scaling_mode = value; + + if (!nv_crtc) + return 0; + + if (modeset || !nv_crtc->set_scale) { + ret = drm_crtc_helper_set_mode(&nv_crtc->base, + &nv_crtc->base.mode, + nv_crtc->base.x, + nv_crtc->base.y, NULL); + if (!ret) + return -EINVAL; + } else { + ret = nv_crtc->set_scale(nv_crtc, true); + if (ret) + return ret; + } + + return 0; + } + + /* Underscan */ + if (property == disp->underscan_property) { + if (nv_connector->underscan != value) { + nv_connector->underscan = value; + if (!nv_crtc || !nv_crtc->set_scale) + return 0; + + return nv_crtc->set_scale(nv_crtc, true); + } + + return 0; + } + + if (property == disp->underscan_hborder_property) { + if (nv_connector->underscan_hborder != value) { + nv_connector->underscan_hborder = value; + if (!nv_crtc || !nv_crtc->set_scale) + return 0; + + return nv_crtc->set_scale(nv_crtc, true); + } + + return 0; + } + + if (property == disp->underscan_vborder_property) { + if (nv_connector->underscan_vborder != value) { + nv_connector->underscan_vborder = value; + if (!nv_crtc || !nv_crtc->set_scale) + return 0; + + return nv_crtc->set_scale(nv_crtc, true); + } + + return 0; + } + + /* Dithering */ + if (property == disp->dithering_mode) { + nv_connector->dithering_mode = value; + if (!nv_crtc || !nv_crtc->set_dither) + return 0; + + return nv_crtc->set_dither(nv_crtc, true); + } + + if (property == disp->dithering_depth) { + nv_connector->dithering_depth = value; + if (!nv_crtc || !nv_crtc->set_dither) + return 0; + + return nv_crtc->set_dither(nv_crtc, true); + } + + if (nv_crtc && nv_crtc->set_color_vibrance) { + /* Hue */ + if (property == disp->vibrant_hue_property) { + nv_crtc->vibrant_hue = value - 90; + return nv_crtc->set_color_vibrance(nv_crtc, true); + } + /* Saturation */ + if (property == disp->color_vibrance_property) { + nv_crtc->color_vibrance = value - 100; + return nv_crtc->set_color_vibrance(nv_crtc, true); + } + } + + if (nv_encoder && nv_encoder->dcb->type == DCB_OUTPUT_TV) + return get_slave_funcs(encoder)->set_property( + encoder, connector, property, value); + + return -EINVAL; +} + +static struct drm_display_mode * +nouveau_connector_native_mode(struct drm_connector *connector) +{ + struct drm_connector_helper_funcs *helper = connector->helper_private; + struct nouveau_drm *drm = nouveau_drm(connector->dev); + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct drm_device *dev = connector->dev; + struct drm_display_mode *mode, *largest = NULL; + int high_w = 0, high_h = 0, high_v = 0; + + list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { + mode->vrefresh = drm_mode_vrefresh(mode); + if (helper->mode_valid(connector, mode) != MODE_OK || + (mode->flags & DRM_MODE_FLAG_INTERLACE)) + continue; + + /* Use preferred mode if there is one.. */ + if (mode->type & DRM_MODE_TYPE_PREFERRED) { + NV_DEBUG(drm, "native mode from preferred\n"); + return drm_mode_duplicate(dev, mode); + } + + /* Otherwise, take the resolution with the largest width, then + * height, then vertical refresh + */ + if (mode->hdisplay < high_w) + continue; + + if (mode->hdisplay == high_w && mode->vdisplay < high_h) + continue; + + if (mode->hdisplay == high_w && mode->vdisplay == high_h && + mode->vrefresh < high_v) + continue; + + high_w = mode->hdisplay; + high_h = mode->vdisplay; + high_v = mode->vrefresh; + largest = mode; + } + + NV_DEBUG(drm, "native mode from largest: %dx%d@%d\n", + high_w, high_h, high_v); + return largest ? drm_mode_duplicate(dev, largest) : NULL; +} + +struct moderec { + int hdisplay; + int vdisplay; +}; + +static struct moderec scaler_modes[] = { + { 1920, 1200 }, + { 1920, 1080 }, + { 1680, 1050 }, + { 1600, 1200 }, + { 1400, 1050 }, + { 1280, 1024 }, + { 1280, 960 }, + { 1152, 864 }, + { 1024, 768 }, + { 800, 600 }, + { 720, 400 }, + { 640, 480 }, + { 640, 400 }, + { 640, 350 }, + {} +}; + +static int +nouveau_connector_scaler_modes_add(struct drm_connector *connector) +{ + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct drm_display_mode *native = nv_connector->native_mode, *m; + struct drm_device *dev = connector->dev; + struct moderec *mode = &scaler_modes[0]; + int modes = 0; + + if (!native) + return 0; + + while (mode->hdisplay) { + if (mode->hdisplay <= native->hdisplay && + mode->vdisplay <= native->vdisplay) { + m = drm_cvt_mode(dev, mode->hdisplay, mode->vdisplay, + drm_mode_vrefresh(native), false, + false, false); + if (!m) + continue; + + m->type |= DRM_MODE_TYPE_DRIVER; + + drm_mode_probed_add(connector, m); + modes++; + } + + mode++; + } + + return modes; +} + +static void +nouveau_connector_detect_depth(struct drm_connector *connector) +{ + struct nouveau_drm *drm = nouveau_drm(connector->dev); + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; + struct nvbios *bios = &drm->vbios; + struct drm_display_mode *mode = nv_connector->native_mode; + bool duallink; + + /* if the edid is feeling nice enough to provide this info, use it */ + if (nv_connector->edid && connector->display_info.bpc) + return; + + /* EDID 1.4 is *supposed* to be supported on eDP, but, Apple... */ + if (nv_connector->type == DCB_CONNECTOR_eDP) { + connector->display_info.bpc = 6; + return; + } + + /* we're out of options unless we're LVDS, default to 8bpc */ + if (nv_encoder->dcb->type != DCB_OUTPUT_LVDS) { + connector->display_info.bpc = 8; + return; + } + + connector->display_info.bpc = 6; + + /* LVDS: panel straps */ + if (bios->fp_no_ddc) { + if (bios->fp.if_is_24bit) + connector->display_info.bpc = 8; + return; + } + + /* LVDS: DDC panel, need to first determine the number of links to + * know which if_is_24bit flag to check... + */ + if (nv_connector->edid && + nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) + duallink = ((u8 *)nv_connector->edid)[121] == 2; + else + duallink = mode->clock >= bios->fp.duallink_transition_clk; + + if ((!duallink && (bios->fp.strapless_is_24bit & 1)) || + ( duallink && (bios->fp.strapless_is_24bit & 2))) + connector->display_info.bpc = 8; +} + +static int +nouveau_connector_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; + struct drm_encoder *encoder = to_drm_encoder(nv_encoder); + int ret = 0; + + /* destroy the native mode, the attached monitor could have changed. + */ + if (nv_connector->native_mode) { + drm_mode_destroy(dev, nv_connector->native_mode); + nv_connector->native_mode = NULL; + } + + if (nv_connector->edid) + ret = drm_add_edid_modes(connector, nv_connector->edid); + else + if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS && + (nv_encoder->dcb->lvdsconf.use_straps_for_mode || + drm->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { + struct drm_display_mode mode; + + nouveau_bios_fp_mode(dev, &mode); + nv_connector->native_mode = drm_mode_duplicate(dev, &mode); + } + + /* Determine display colour depth for everything except LVDS now, + * DP requires this before mode_valid() is called. + */ + if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) + nouveau_connector_detect_depth(connector); + + /* Find the native mode if this is a digital panel, if we didn't + * find any modes through DDC previously add the native mode to + * the list of modes. + */ + if (!nv_connector->native_mode) + nv_connector->native_mode = + nouveau_connector_native_mode(connector); + if (ret == 0 && nv_connector->native_mode) { + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(dev, nv_connector->native_mode); + drm_mode_probed_add(connector, mode); + ret = 1; + } + + /* Determine LVDS colour depth, must happen after determining + * "native" mode as some VBIOS tables require us to use the + * pixel clock as part of the lookup... + */ + if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) + nouveau_connector_detect_depth(connector); + + if (nv_encoder->dcb->type == DCB_OUTPUT_TV) + ret = get_slave_funcs(encoder)->get_modes(encoder, connector); + + if (nv_connector->type == DCB_CONNECTOR_LVDS || + nv_connector->type == DCB_CONNECTOR_LVDS_SPWG || + nv_connector->type == DCB_CONNECTOR_eDP) + ret += nouveau_connector_scaler_modes_add(connector); + + return ret; +} + +static unsigned +get_tmds_link_bandwidth(struct drm_connector *connector) +{ + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_drm *drm = nouveau_drm(connector->dev); + struct dcb_output *dcb = nv_connector->detected_encoder->dcb; + + if (dcb->location != DCB_LOC_ON_CHIP || + nv_device(drm->device)->chipset >= 0x46) + return 165000; + else if (nv_device(drm->device)->chipset >= 0x40) + return 155000; + else if (nv_device(drm->device)->chipset >= 0x18) + return 135000; + else + return 112000; +} + +static int +nouveau_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; + struct drm_encoder *encoder = to_drm_encoder(nv_encoder); + unsigned min_clock = 25000, max_clock = min_clock; + unsigned clock = mode->clock; + + switch (nv_encoder->dcb->type) { + case DCB_OUTPUT_LVDS: + if (nv_connector->native_mode && + (mode->hdisplay > nv_connector->native_mode->hdisplay || + mode->vdisplay > nv_connector->native_mode->vdisplay)) + return MODE_PANEL; + + min_clock = 0; + max_clock = 400000; + break; + case DCB_OUTPUT_TMDS: + max_clock = get_tmds_link_bandwidth(connector); + if (nouveau_duallink && nv_encoder->dcb->duallink_possible) + max_clock *= 2; + break; + case DCB_OUTPUT_ANALOG: + max_clock = nv_encoder->dcb->crtconf.maxfreq; + if (!max_clock) + max_clock = 350000; + break; + case DCB_OUTPUT_TV: + return get_slave_funcs(encoder)->mode_valid(encoder, mode); + case DCB_OUTPUT_DP: + max_clock = nv_encoder->dp.link_nr; + max_clock *= nv_encoder->dp.link_bw; + clock = clock * (connector->display_info.bpc * 3) / 10; + break; + default: + BUG_ON(1); + return MODE_BAD; + } + + if (clock < min_clock) + return MODE_CLOCK_LOW; + + if (clock > max_clock) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static struct drm_encoder * +nouveau_connector_best_encoder(struct drm_connector *connector) +{ + struct nouveau_connector *nv_connector = nouveau_connector(connector); + + if (nv_connector->detected_encoder) + return to_drm_encoder(nv_connector->detected_encoder); + + return NULL; +} + +static const struct drm_connector_helper_funcs +nouveau_connector_helper_funcs = { + .get_modes = nouveau_connector_get_modes, + .mode_valid = nouveau_connector_mode_valid, + .best_encoder = nouveau_connector_best_encoder, +}; + +static const struct drm_connector_funcs +nouveau_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .save = NULL, + .restore = NULL, + .detect = nouveau_connector_detect, + .destroy = nouveau_connector_destroy, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = nouveau_connector_set_property, + .force = nouveau_connector_force +}; + +static const struct drm_connector_funcs +nouveau_connector_funcs_lvds = { + .dpms = drm_helper_connector_dpms, + .save = NULL, + .restore = NULL, + .detect = nouveau_connector_detect_lvds, + .destroy = nouveau_connector_destroy, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = nouveau_connector_set_property, + .force = nouveau_connector_force +}; + +static void +nouveau_connector_hotplug_work(struct work_struct *work) +{ + struct nouveau_connector *nv_connector = + container_of(work, struct nouveau_connector, hpd_work); + struct drm_connector *connector = &nv_connector->base; + struct drm_device *dev = connector->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_gpio *gpio = nouveau_gpio(drm->device); + bool plugged = gpio->get(gpio, 0, nv_connector->hpd.func, 0xff); + + NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", + drm_get_connector_name(connector)); + + if (plugged) + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); + else + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + + drm_helper_hpd_irq_event(dev); +} + +static int +nouveau_connector_hotplug(struct nouveau_eventh *event, int index) +{ + struct nouveau_connector *nv_connector = + container_of(event, struct nouveau_connector, hpd_func); + schedule_work(&nv_connector->hpd_work); + return NVKM_EVENT_KEEP; +} + +static int +drm_conntype_from_dcb(enum dcb_connector_type dcb) +{ + switch (dcb) { + case DCB_CONNECTOR_VGA : return DRM_MODE_CONNECTOR_VGA; + case DCB_CONNECTOR_TV_0 : + case DCB_CONNECTOR_TV_1 : + case DCB_CONNECTOR_TV_3 : return DRM_MODE_CONNECTOR_TV; + case DCB_CONNECTOR_DMS59_0 : + case DCB_CONNECTOR_DMS59_1 : + case DCB_CONNECTOR_DVI_I : return DRM_MODE_CONNECTOR_DVII; + case DCB_CONNECTOR_DVI_D : return DRM_MODE_CONNECTOR_DVID; + case DCB_CONNECTOR_LVDS : + case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS; + case DCB_CONNECTOR_DMS59_DP0: + case DCB_CONNECTOR_DMS59_DP1: + case DCB_CONNECTOR_DP : return DRM_MODE_CONNECTOR_DisplayPort; + case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP; + case DCB_CONNECTOR_HDMI_0 : + case DCB_CONNECTOR_HDMI_1 : return DRM_MODE_CONNECTOR_HDMIA; + default: + break; + } + + return DRM_MODE_CONNECTOR_Unknown; +} + +struct drm_connector * +nouveau_connector_create(struct drm_device *dev, int index) +{ + const struct drm_connector_funcs *funcs = &nouveau_connector_funcs; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_gpio *gpio = nouveau_gpio(drm->device); + struct nouveau_display *disp = nouveau_display(dev); + struct nouveau_connector *nv_connector = NULL; + struct drm_connector *connector; + int type, ret = 0; + bool dummy; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + nv_connector = nouveau_connector(connector); + if (nv_connector->index == index) + return connector; + } + + nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); + if (!nv_connector) + return ERR_PTR(-ENOMEM); + + connector = &nv_connector->base; + INIT_WORK(&nv_connector->hpd_work, nouveau_connector_hotplug_work); + nv_connector->index = index; + + /* attempt to parse vbios connector type and hotplug gpio */ + nv_connector->dcb = olddcb_conn(dev, index); + if (nv_connector->dcb) { + static const u8 hpd[16] = { + 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60, + }; + + u32 entry = ROM16(nv_connector->dcb[0]); + if (olddcb_conntab(dev)[3] >= 4) + entry |= (u32)ROM16(nv_connector->dcb[2]) << 16; + + ret = gpio->find(gpio, 0, hpd[ffs((entry & 0x07033000) >> 12)], + DCB_GPIO_UNUSED, &nv_connector->hpd); + nv_connector->hpd_func.func = nouveau_connector_hotplug; + if (ret) + nv_connector->hpd.func = DCB_GPIO_UNUSED; + + nv_connector->type = nv_connector->dcb[0]; + if (drm_conntype_from_dcb(nv_connector->type) == + DRM_MODE_CONNECTOR_Unknown) { + NV_WARN(drm, "unknown connector type %02x\n", + nv_connector->type); + nv_connector->type = DCB_CONNECTOR_NONE; + } + + /* Gigabyte NX85T */ + if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) { + if (nv_connector->type == DCB_CONNECTOR_HDMI_1) + nv_connector->type = DCB_CONNECTOR_DVI_I; + } + + /* Gigabyte GV-NX86T512H */ + if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) { + if (nv_connector->type == DCB_CONNECTOR_HDMI_1) + nv_connector->type = DCB_CONNECTOR_DVI_I; + } + } else { + nv_connector->type = DCB_CONNECTOR_NONE; + nv_connector->hpd.func = DCB_GPIO_UNUSED; + } + + /* no vbios data, or an unknown dcb connector type - attempt to + * figure out something suitable ourselves + */ + if (nv_connector->type == DCB_CONNECTOR_NONE) { + struct nouveau_drm *drm = nouveau_drm(dev); + struct dcb_table *dcbt = &drm->vbios.dcb; + u32 encoders = 0; + int i; + + for (i = 0; i < dcbt->entries; i++) { + if (dcbt->entry[i].connector == nv_connector->index) + encoders |= (1 << dcbt->entry[i].type); + } + + if (encoders & (1 << DCB_OUTPUT_DP)) { + if (encoders & (1 << DCB_OUTPUT_TMDS)) + nv_connector->type = DCB_CONNECTOR_DP; + else + nv_connector->type = DCB_CONNECTOR_eDP; + } else + if (encoders & (1 << DCB_OUTPUT_TMDS)) { + if (encoders & (1 << DCB_OUTPUT_ANALOG)) + nv_connector->type = DCB_CONNECTOR_DVI_I; + else + nv_connector->type = DCB_CONNECTOR_DVI_D; + } else + if (encoders & (1 << DCB_OUTPUT_ANALOG)) { + nv_connector->type = DCB_CONNECTOR_VGA; + } else + if (encoders & (1 << DCB_OUTPUT_LVDS)) { + nv_connector->type = DCB_CONNECTOR_LVDS; + } else + if (encoders & (1 << DCB_OUTPUT_TV)) { + nv_connector->type = DCB_CONNECTOR_TV_0; + } + } + + type = drm_conntype_from_dcb(nv_connector->type); + if (type == DRM_MODE_CONNECTOR_LVDS) { + ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy); + if (ret) { + NV_ERROR(drm, "Error parsing LVDS table, disabling\n"); + kfree(nv_connector); + return ERR_PTR(ret); + } + + funcs = &nouveau_connector_funcs_lvds; + } else { + funcs = &nouveau_connector_funcs; + } + + /* defaults, will get overridden in detect() */ + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + + drm_connector_init(dev, connector, funcs, type); + drm_connector_helper_add(connector, &nouveau_connector_helper_funcs); + + /* Init DVI-I specific properties */ + if (nv_connector->type == DCB_CONNECTOR_DVI_I) + drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0); + + /* Add overscan compensation options to digital outputs */ + if (disp->underscan_property && + (type == DRM_MODE_CONNECTOR_DVID || + type == DRM_MODE_CONNECTOR_DVII || + type == DRM_MODE_CONNECTOR_HDMIA || + type == DRM_MODE_CONNECTOR_DisplayPort)) { + drm_object_attach_property(&connector->base, + disp->underscan_property, + UNDERSCAN_OFF); + drm_object_attach_property(&connector->base, + disp->underscan_hborder_property, + 0); + drm_object_attach_property(&connector->base, + disp->underscan_vborder_property, + 0); + } + + /* Add hue and saturation options */ + if (disp->vibrant_hue_property) + drm_object_attach_property(&connector->base, + disp->vibrant_hue_property, + 90); + if (disp->color_vibrance_property) + drm_object_attach_property(&connector->base, + disp->color_vibrance_property, + 150); + + switch (nv_connector->type) { + case DCB_CONNECTOR_VGA: + if (nv_device(drm->device)->card_type >= NV_50) { + drm_object_attach_property(&connector->base, + dev->mode_config.scaling_mode_property, + nv_connector->scaling_mode); + } + /* fall-through */ + case DCB_CONNECTOR_TV_0: + case DCB_CONNECTOR_TV_1: + case DCB_CONNECTOR_TV_3: + nv_connector->scaling_mode = DRM_MODE_SCALE_NONE; + break; + default: + nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; + + drm_object_attach_property(&connector->base, + dev->mode_config.scaling_mode_property, + nv_connector->scaling_mode); + if (disp->dithering_mode) { + nv_connector->dithering_mode = DITHERING_MODE_AUTO; + drm_object_attach_property(&connector->base, + disp->dithering_mode, + nv_connector->dithering_mode); + } + if (disp->dithering_depth) { + nv_connector->dithering_depth = DITHERING_DEPTH_AUTO; + drm_object_attach_property(&connector->base, + disp->dithering_depth, + nv_connector->dithering_depth); + } + break; + } + + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + if (nv_connector->hpd.func != DCB_GPIO_UNUSED) + connector->polled = DRM_CONNECTOR_POLL_HPD; + + drm_sysfs_connector_add(connector); + return connector; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h new file mode 100644 index 0000000..6e399aa --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NOUVEAU_CONNECTOR_H__ +#define __NOUVEAU_CONNECTOR_H__ + +#include <drm/drm_edid.h> +#include "nouveau_crtc.h" + +#include <core/event.h> + +#include <subdev/bios.h> +#include <subdev/bios/gpio.h> + +struct nouveau_i2c_port; + +enum nouveau_underscan_type { + UNDERSCAN_OFF, + UNDERSCAN_ON, + UNDERSCAN_AUTO, +}; + +/* the enum values specifically defined here match nv50/nvd0 hw values, and + * the code relies on this + */ +enum nouveau_dithering_mode { + DITHERING_MODE_OFF = 0x00, + DITHERING_MODE_ON = 0x01, + DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON, + DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON, + DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON, + DITHERING_MODE_AUTO +}; + +enum nouveau_dithering_depth { + DITHERING_DEPTH_6BPC = 0x00, + DITHERING_DEPTH_8BPC = 0x02, + DITHERING_DEPTH_AUTO +}; + +struct nouveau_connector { + struct drm_connector base; + enum dcb_connector_type type; + u8 index; + u8 *dcb; + + struct dcb_gpio_func hpd; + struct work_struct hpd_work; + struct nouveau_eventh hpd_func; + + int dithering_mode; + int dithering_depth; + int scaling_mode; + enum nouveau_underscan_type underscan; + u32 underscan_hborder; + u32 underscan_vborder; + + struct nouveau_encoder *detected_encoder; + struct edid *edid; + struct drm_display_mode *native_mode; +}; + +static inline struct nouveau_connector *nouveau_connector( + struct drm_connector *con) +{ + return container_of(con, struct nouveau_connector, base); +} + +static inline struct nouveau_connector * +nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct drm_connector *connector; + struct drm_crtc *crtc = to_drm_crtc(nv_crtc); + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->encoder && connector->encoder->crtc == crtc) + return nouveau_connector(connector); + } + + return NULL; +} + +struct drm_connector * +nouveau_connector_create(struct drm_device *, int index); + +int +nouveau_connector_bpp(struct drm_connector *); + +#endif /* __NOUVEAU_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h new file mode 100644 index 0000000..d1e5890 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NOUVEAU_CRTC_H__ +#define __NOUVEAU_CRTC_H__ + +struct nouveau_crtc { + struct drm_crtc base; + + int index; + + uint32_t dpms_saved_fp_control; + uint32_t fp_users; + int saturation; + int color_vibrance; + int vibrant_hue; + int sharpness; + int last_dpms; + + int cursor_saved_x, cursor_saved_y; + + struct { + int cpp; + bool blanked; + uint32_t offset; + uint32_t tile_flags; + } fb; + + struct { + struct nouveau_bo *nvbo; + bool visible; + uint32_t offset; + void (*set_offset)(struct nouveau_crtc *, uint32_t offset); + void (*set_pos)(struct nouveau_crtc *, int x, int y); + void (*hide)(struct nouveau_crtc *, bool update); + void (*show)(struct nouveau_crtc *, bool update); + } cursor; + + struct { + struct nouveau_bo *nvbo; + uint16_t r[256]; + uint16_t g[256]; + uint16_t b[256]; + int depth; + } lut; + + int (*set_dither)(struct nouveau_crtc *crtc, bool update); + int (*set_scale)(struct nouveau_crtc *crtc, bool update); + int (*set_color_vibrance)(struct nouveau_crtc *crtc, bool update); +}; + +static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc) +{ + return container_of(crtc, struct nouveau_crtc, base); +} + +static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc) +{ + return &crtc->base; +} + +int nv04_cursor_init(struct nouveau_crtc *); + +#endif /* __NOUVEAU_CRTC_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c new file mode 100644 index 0000000..5392e07 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2009 Red Hat <bskeggs@redhat.com> + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* + * Authors: + * Ben Skeggs <bskeggs@redhat.com> + */ + +#include "nouveau_debugfs.h" +#include "nouveau_drm.h" + +static int +nouveau_debugfs_vbios_image(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct nouveau_drm *drm = nouveau_drm(node->minor->dev); + int i; + + for (i = 0; i < drm->vbios.length; i++) + seq_printf(m, "%c", drm->vbios.data[i]); + return 0; +} + +static struct drm_info_list nouveau_debugfs_list[] = { + { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, +}; +#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) + +int +nouveau_debugfs_init(struct drm_minor *minor) +{ + drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES, + minor->debugfs_root, minor); + return 0; +} + +void +nouveau_debugfs_takedown(struct drm_minor *minor) +{ + drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES, + minor); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h new file mode 100644 index 0000000..a62af6f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h @@ -0,0 +1,22 @@ +#ifndef __NOUVEAU_DEBUGFS_H__ +#define __NOUVEAU_DEBUGFS_H__ + +#include <drm/drmP.h> + +#if defined(CONFIG_DEBUG_FS) +extern int nouveau_debugfs_init(struct drm_minor *); +extern void nouveau_debugfs_takedown(struct drm_minor *); +#else +static inline int +nouveau_debugfs_init(struct drm_minor *minor) +{ + return 0; +} + +static inline void nouveau_debugfs_takedown(struct drm_minor *minor) +{ +} + +#endif + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c new file mode 100644 index 0000000..f17dc2a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -0,0 +1,714 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "nouveau_fbcon.h" +#include "dispnv04/hw.h" +#include "nouveau_crtc.h" +#include "nouveau_dma.h" +#include "nouveau_gem.h" +#include "nouveau_connector.h" +#include "nv50_display.h" + +#include "nouveau_fence.h" + +#include <subdev/bios/gpio.h> +#include <subdev/gpio.h> +#include <engine/disp.h> + +#include <core/class.h> + +static void +nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) +{ + struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); + + if (fb->nvbo) + drm_gem_object_unreference_unlocked(fb->nvbo->gem); + + drm_framebuffer_cleanup(drm_fb); + kfree(fb); +} + +static int +nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb, + struct drm_file *file_priv, + unsigned int *handle) +{ + struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); + + return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle); +} + +static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = { + .destroy = nouveau_user_framebuffer_destroy, + .create_handle = nouveau_user_framebuffer_create_handle, +}; + +int +nouveau_framebuffer_init(struct drm_device *dev, + struct nouveau_framebuffer *nv_fb, + struct drm_mode_fb_cmd2 *mode_cmd, + struct nouveau_bo *nvbo) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct drm_framebuffer *fb = &nv_fb->base; + int ret; + + drm_helper_mode_fill_fb_struct(fb, mode_cmd); + nv_fb->nvbo = nvbo; + + if (nv_device(drm->device)->card_type >= NV_50) { + u32 tile_flags = nouveau_bo_tile_layout(nvbo); + if (tile_flags == 0x7a00 || + tile_flags == 0xfe00) + nv_fb->r_dma = NvEvoFB32; + else + if (tile_flags == 0x7000) + nv_fb->r_dma = NvEvoFB16; + else + nv_fb->r_dma = NvEvoVRAM_LP; + + switch (fb->depth) { + case 8: nv_fb->r_format = 0x1e00; break; + case 15: nv_fb->r_format = 0xe900; break; + case 16: nv_fb->r_format = 0xe800; break; + case 24: + case 32: nv_fb->r_format = 0xcf00; break; + case 30: nv_fb->r_format = 0xd100; break; + default: + NV_ERROR(drm, "unknown depth %d\n", fb->depth); + return -EINVAL; + } + + if (nv_device(drm->device)->chipset == 0x50) + nv_fb->r_format |= (tile_flags << 8); + + if (!tile_flags) { + if (nv_device(drm->device)->card_type < NV_D0) + nv_fb->r_pitch = 0x00100000 | fb->pitches[0]; + else + nv_fb->r_pitch = 0x01000000 | fb->pitches[0]; + } else { + u32 mode = nvbo->tile_mode; + if (nv_device(drm->device)->card_type >= NV_C0) + mode >>= 4; + nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode; + } + } + + ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs); + if (ret) { + return ret; + } + + return 0; +} + +static struct drm_framebuffer * +nouveau_user_framebuffer_create(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct nouveau_framebuffer *nouveau_fb; + struct drm_gem_object *gem; + int ret; + + gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); + if (!gem) + return ERR_PTR(-ENOENT); + + nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); + if (!nouveau_fb) + return ERR_PTR(-ENOMEM); + + ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); + if (ret) { + drm_gem_object_unreference(gem); + return ERR_PTR(ret); + } + + return &nouveau_fb->base; +} + +static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { + .fb_create = nouveau_user_framebuffer_create, + .output_poll_changed = nouveau_fbcon_output_poll_changed, +}; + + +struct nouveau_drm_prop_enum_list { + u8 gen_mask; + int type; + char *name; +}; + +static struct nouveau_drm_prop_enum_list underscan[] = { + { 6, UNDERSCAN_AUTO, "auto" }, + { 6, UNDERSCAN_OFF, "off" }, + { 6, UNDERSCAN_ON, "on" }, + {} +}; + +static struct nouveau_drm_prop_enum_list dither_mode[] = { + { 7, DITHERING_MODE_AUTO, "auto" }, + { 7, DITHERING_MODE_OFF, "off" }, + { 1, DITHERING_MODE_ON, "on" }, + { 6, DITHERING_MODE_STATIC2X2, "static 2x2" }, + { 6, DITHERING_MODE_DYNAMIC2X2, "dynamic 2x2" }, + { 4, DITHERING_MODE_TEMPORAL, "temporal" }, + {} +}; + +static struct nouveau_drm_prop_enum_list dither_depth[] = { + { 6, DITHERING_DEPTH_AUTO, "auto" }, + { 6, DITHERING_DEPTH_6BPC, "6 bpc" }, + { 6, DITHERING_DEPTH_8BPC, "8 bpc" }, + {} +}; + +#define PROP_ENUM(p,gen,n,list) do { \ + struct nouveau_drm_prop_enum_list *l = (list); \ + int c = 0; \ + while (l->gen_mask) { \ + if (l->gen_mask & (1 << (gen))) \ + c++; \ + l++; \ + } \ + if (c) { \ + p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c); \ + l = (list); \ + c = 0; \ + while (p && l->gen_mask) { \ + if (l->gen_mask & (1 << (gen))) { \ + drm_property_add_enum(p, c, l->type, l->name); \ + c++; \ + } \ + l++; \ + } \ + } \ +} while(0) + +int +nouveau_display_init(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_display *disp = nouveau_display(dev); + struct nouveau_gpio *gpio = nouveau_gpio(drm->device); + struct drm_connector *connector; + int ret; + + ret = disp->init(dev); + if (ret) + return ret; + + /* enable polling for external displays */ + drm_kms_helper_poll_enable(dev); + + /* enable hotplug interrupts */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct nouveau_connector *conn = nouveau_connector(connector); + if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) { + nouveau_event_get(gpio->events, conn->hpd.line, + &conn->hpd_func); + } + } + + return ret; +} + +void +nouveau_display_fini(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_display *disp = nouveau_display(dev); + struct nouveau_gpio *gpio = nouveau_gpio(drm->device); + struct drm_connector *connector; + + /* disable hotplug interrupts */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct nouveau_connector *conn = nouveau_connector(connector); + if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) { + nouveau_event_put(gpio->events, conn->hpd.line, + &conn->hpd_func); + } + } + + drm_kms_helper_poll_disable(dev); + disp->fini(dev); +} + +int +nouveau_display_create(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_display *disp; + u32 pclass = dev->pdev->class >> 8; + int ret, gen; + + disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL); + if (!disp) + return -ENOMEM; + + drm_mode_config_init(dev); + drm_mode_create_scaling_mode_property(dev); + drm_mode_create_dvi_i_properties(dev); + + if (nv_device(drm->device)->card_type < NV_50) + gen = 0; + else + if (nv_device(drm->device)->card_type < NV_D0) + gen = 1; + else + gen = 2; + + PROP_ENUM(disp->dithering_mode, gen, "dithering mode", dither_mode); + PROP_ENUM(disp->dithering_depth, gen, "dithering depth", dither_depth); + PROP_ENUM(disp->underscan_property, gen, "underscan", underscan); + + disp->underscan_hborder_property = + drm_property_create_range(dev, 0, "underscan hborder", 0, 128); + + disp->underscan_vborder_property = + drm_property_create_range(dev, 0, "underscan vborder", 0, 128); + + if (gen >= 1) { + /* -90..+90 */ + disp->vibrant_hue_property = + drm_property_create_range(dev, 0, "vibrant hue", 0, 180); + + /* -100..+100 */ + disp->color_vibrance_property = + drm_property_create_range(dev, 0, "color vibrance", 0, 200); + } + + dev->mode_config.funcs = &nouveau_mode_config_funcs; + dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1); + + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + if (nv_device(drm->device)->card_type < NV_10) { + dev->mode_config.max_width = 2048; + dev->mode_config.max_height = 2048; + } else + if (nv_device(drm->device)->card_type < NV_50) { + dev->mode_config.max_width = 4096; + dev->mode_config.max_height = 4096; + } else { + dev->mode_config.max_width = 8192; + dev->mode_config.max_height = 8192; + } + + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; + + drm_kms_helper_poll_init(dev); + drm_kms_helper_poll_disable(dev); + + if (nouveau_modeset == 1 || + (nouveau_modeset < 0 && pclass == PCI_CLASS_DISPLAY_VGA)) { + if (nv_device(drm->device)->card_type < NV_50) + ret = nv04_display_create(dev); + else + ret = nv50_display_create(dev); + if (ret) + goto disp_create_err; + + if (dev->mode_config.num_crtc) { + ret = drm_vblank_init(dev, dev->mode_config.num_crtc); + if (ret) + goto vblank_err; + } + + nouveau_backlight_init(dev); + } + + return 0; + +vblank_err: + disp->dtor(dev); +disp_create_err: + drm_kms_helper_poll_fini(dev); + drm_mode_config_cleanup(dev); + return ret; +} + +void +nouveau_display_destroy(struct drm_device *dev) +{ + struct nouveau_display *disp = nouveau_display(dev); + + nouveau_backlight_exit(dev); + drm_vblank_cleanup(dev); + + drm_kms_helper_poll_fini(dev); + drm_mode_config_cleanup(dev); + + if (disp->dtor) + disp->dtor(dev); + + nouveau_drm(dev)->display = NULL; + kfree(disp); +} + +int +nouveau_display_suspend(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct drm_crtc *crtc; + + nouveau_display_fini(dev); + + NV_INFO(drm, "unpinning framebuffer(s)...\n"); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_framebuffer *nouveau_fb; + + nouveau_fb = nouveau_framebuffer(crtc->fb); + if (!nouveau_fb || !nouveau_fb->nvbo) + continue; + + nouveau_bo_unpin(nouveau_fb->nvbo); + } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + nouveau_bo_unmap(nv_crtc->cursor.nvbo); + nouveau_bo_unpin(nv_crtc->cursor.nvbo); + } + + return 0; +} + +void +nouveau_display_resume(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct drm_crtc *crtc; + int ret; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_framebuffer *nouveau_fb; + + nouveau_fb = nouveau_framebuffer(crtc->fb); + if (!nouveau_fb || !nouveau_fb->nvbo) + continue; + + nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM); + } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); + if (!ret) + ret = nouveau_bo_map(nv_crtc->cursor.nvbo); + if (ret) + NV_ERROR(drm, "Could not pin/map cursor.\n"); + } + + nouveau_fbcon_set_suspend(dev, 0); + nouveau_fbcon_zfill_all(dev); + + nouveau_display_init(dev); + + /* Force CLUT to get re-loaded during modeset */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + nv_crtc->lut.depth = 0; + } + + drm_helper_resume_force_mode(dev); + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + u32 offset = nv_crtc->cursor.nvbo->bo.offset; + + nv_crtc->cursor.set_offset(nv_crtc, offset); + nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, + nv_crtc->cursor_saved_y); + } +} + +static int +nouveau_page_flip_reserve(struct nouveau_bo *old_bo, + struct nouveau_bo *new_bo) +{ + int ret; + + ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); + if (ret) + return ret; + + ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0); + if (ret) + goto fail; + + if (likely(old_bo != new_bo)) { + ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0); + if (ret) + goto fail_unreserve; + } + + return 0; + +fail_unreserve: + ttm_bo_unreserve(&new_bo->bo); +fail: + nouveau_bo_unpin(new_bo); + return ret; +} + +static void +nouveau_page_flip_unreserve(struct nouveau_bo *old_bo, + struct nouveau_bo *new_bo, + struct nouveau_fence *fence) +{ + nouveau_bo_fence(new_bo, fence); + ttm_bo_unreserve(&new_bo->bo); + + if (likely(old_bo != new_bo)) { + nouveau_bo_fence(old_bo, fence); + ttm_bo_unreserve(&old_bo->bo); + } + + nouveau_bo_unpin(old_bo); +} + +static int +nouveau_page_flip_emit(struct nouveau_channel *chan, + struct nouveau_bo *old_bo, + struct nouveau_bo *new_bo, + struct nouveau_page_flip_state *s, + struct nouveau_fence **pfence) +{ + struct nouveau_fence_chan *fctx = chan->fence; + struct nouveau_drm *drm = chan->drm; + struct drm_device *dev = drm->dev; + unsigned long flags; + int ret; + + /* Queue it to the pending list */ + spin_lock_irqsave(&dev->event_lock, flags); + list_add_tail(&s->head, &fctx->flip); + spin_unlock_irqrestore(&dev->event_lock, flags); + + /* Synchronize with the old framebuffer */ + ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan); + if (ret) + goto fail; + + /* Emit the pageflip */ + ret = RING_SPACE(chan, 3); + if (ret) + goto fail; + + if (nv_device(drm->device)->card_type < NV_C0) { + BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1); + OUT_RING (chan, 0x00000000); + OUT_RING (chan, 0x00000000); + } else { + BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1); + OUT_RING (chan, 0); + BEGIN_IMC0(chan, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000); + } + FIRE_RING (chan); + + ret = nouveau_fence_new(chan, false, pfence); + if (ret) + goto fail; + + return 0; +fail: + spin_lock_irqsave(&dev->event_lock, flags); + list_del(&s->head); + spin_unlock_irqrestore(&dev->event_lock, flags); + return ret; +} + +int +nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event) +{ + struct drm_device *dev = crtc->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo; + struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo; + struct nouveau_page_flip_state *s; + struct nouveau_channel *chan = NULL; + struct nouveau_fence *fence; + int ret; + + if (!drm->channel) + return -ENODEV; + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + /* Don't let the buffers go away while we flip */ + ret = nouveau_page_flip_reserve(old_bo, new_bo); + if (ret) + goto fail_free; + + /* Initialize a page flip struct */ + *s = (struct nouveau_page_flip_state) + { { }, event, nouveau_crtc(crtc)->index, + fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y, + new_bo->bo.offset }; + + /* Choose the channel the flip will be handled in */ + fence = new_bo->bo.sync_obj; + if (fence) + chan = fence->channel; + if (!chan) + chan = drm->channel; + mutex_lock(&chan->cli->mutex); + + /* Emit a page flip */ + if (nv_device(drm->device)->card_type >= NV_50) { + ret = nv50_display_flip_next(crtc, fb, chan, 0); + if (ret) { + mutex_unlock(&chan->cli->mutex); + goto fail_unreserve; + } + } + + ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); + mutex_unlock(&chan->cli->mutex); + if (ret) + goto fail_unreserve; + + /* Update the crtc struct and cleanup */ + crtc->fb = fb; + + nouveau_page_flip_unreserve(old_bo, new_bo, fence); + nouveau_fence_unref(&fence); + return 0; + +fail_unreserve: + nouveau_page_flip_unreserve(old_bo, new_bo, NULL); +fail_free: + kfree(s); + return ret; +} + +int +nouveau_finish_page_flip(struct nouveau_channel *chan, + struct nouveau_page_flip_state *ps) +{ + struct nouveau_fence_chan *fctx = chan->fence; + struct nouveau_drm *drm = chan->drm; + struct drm_device *dev = drm->dev; + struct nouveau_page_flip_state *s; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + + if (list_empty(&fctx->flip)) { + NV_ERROR(drm, "unexpected pageflip\n"); + spin_unlock_irqrestore(&dev->event_lock, flags); + return -EINVAL; + } + + s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); + if (s->event) + drm_send_vblank_event(dev, -1, s->event); + + list_del(&s->head); + if (ps) + *ps = *s; + kfree(s); + + spin_unlock_irqrestore(&dev->event_lock, flags); + return 0; +} + +int +nouveau_flip_complete(void *data) +{ + struct nouveau_channel *chan = data; + struct nouveau_drm *drm = chan->drm; + struct nouveau_page_flip_state state; + + if (!nouveau_finish_page_flip(chan, &state)) { + if (nv_device(drm->device)->card_type < NV_50) { + nv_set_crtc_base(drm->dev, state.crtc, state.offset + + state.y * state.pitch + + state.x * state.bpp / 8); + } + } + + return 0; +} + +int +nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct nouveau_bo *bo; + int ret; + + args->pitch = roundup(args->width * (args->bpp / 8), 256); + args->size = args->pitch * args->height; + args->size = roundup(args->size, PAGE_SIZE); + + ret = nouveau_gem_new(dev, args->size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, &bo); + if (ret) + return ret; + + ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle); + drm_gem_object_unreference_unlocked(bo->gem); + return ret; +} + +int +nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, + uint32_t handle) +{ + return drm_gem_handle_delete(file_priv, handle); +} + +int +nouveau_display_dumb_map_offset(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle, uint64_t *poffset) +{ + struct drm_gem_object *gem; + + gem = drm_gem_object_lookup(dev, file_priv, handle); + if (gem) { + struct nouveau_bo *bo = gem->driver_private; + *poffset = bo->bo.addr_space_offset; + drm_gem_object_unreference_unlocked(gem); + return 0; + } + + return -ENOENT; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h new file mode 100644 index 0000000..aa91ed7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_display.h @@ -0,0 +1,91 @@ +#ifndef __NOUVEAU_DISPLAY_H__ +#define __NOUVEAU_DISPLAY_H__ + +#include <subdev/vm.h> + +#include "nouveau_drm.h" + +struct nouveau_framebuffer { + struct drm_framebuffer base; + struct nouveau_bo *nvbo; + struct nouveau_vma vma; + u32 r_dma; + u32 r_format; + u32 r_pitch; +}; + +static inline struct nouveau_framebuffer * +nouveau_framebuffer(struct drm_framebuffer *fb) +{ + return container_of(fb, struct nouveau_framebuffer, base); +} + +int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *, + struct drm_mode_fb_cmd2 *, struct nouveau_bo *); + +struct nouveau_page_flip_state { + struct list_head head; + struct drm_pending_vblank_event *event; + int crtc, bpp, pitch, x, y; + u64 offset; +}; + +struct nouveau_display { + void *priv; + void (*dtor)(struct drm_device *); + int (*init)(struct drm_device *); + void (*fini)(struct drm_device *); + + struct drm_property *dithering_mode; + struct drm_property *dithering_depth; + struct drm_property *underscan_property; + struct drm_property *underscan_hborder_property; + struct drm_property *underscan_vborder_property; + /* not really hue and saturation: */ + struct drm_property *vibrant_hue_property; + struct drm_property *color_vibrance_property; +}; + +static inline struct nouveau_display * +nouveau_display(struct drm_device *dev) +{ + return nouveau_drm(dev)->display; +} + +int nouveau_display_create(struct drm_device *dev); +void nouveau_display_destroy(struct drm_device *dev); +int nouveau_display_init(struct drm_device *dev); +void nouveau_display_fini(struct drm_device *dev); +int nouveau_display_suspend(struct drm_device *dev); +void nouveau_display_resume(struct drm_device *dev); + +int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event); +int nouveau_finish_page_flip(struct nouveau_channel *, + struct nouveau_page_flip_state *); + +int nouveau_display_dumb_create(struct drm_file *, struct drm_device *, + struct drm_mode_create_dumb *args); +int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *, + u32 handle, u64 *offset); +int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *, + u32 handle); + +void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *); + +#ifdef CPTCFG_DRM_NOUVEAU_BACKLIGHT +extern int nouveau_backlight_init(struct drm_device *); +extern void nouveau_backlight_exit(struct drm_device *); +#else +static inline int +nouveau_backlight_init(struct drm_device *dev) +{ + return 0; +} + +static inline void +nouveau_backlight_exit(struct drm_device *dev) { +} +#endif + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c new file mode 100644 index 0000000..40f91e1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -0,0 +1,262 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <core/client.h> + +#include "nouveau_drm.h" +#include "nouveau_dma.h" + +void +OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords) +{ + bool is_iomem; + u32 *mem = ttm_kmap_obj_virtual(&chan->push.buffer->kmap, &is_iomem); + mem = &mem[chan->dma.cur]; + if (is_iomem) + memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4); + else + memcpy(mem, data, nr_dwords * 4); + chan->dma.cur += nr_dwords; +} + +/* Fetch and adjust GPU GET pointer + * + * Returns: + * value >= 0, the adjusted GET pointer + * -EINVAL if GET pointer currently outside main push buffer + * -EBUSY if timeout exceeded + */ +static inline int +READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout) +{ + uint64_t val; + + val = nv_ro32(chan->object, chan->user_get); + if (chan->user_get_hi) + val |= (uint64_t)nv_ro32(chan->object, chan->user_get_hi) << 32; + + /* reset counter as long as GET is still advancing, this is + * to avoid misdetecting a GPU lockup if the GPU happens to + * just be processing an operation that takes a long time + */ + if (val != *prev_get) { + *prev_get = val; + *timeout = 0; + } + + if ((++*timeout & 0xff) == 0) { + udelay(1); + if (*timeout > 100000) + return -EBUSY; + } + + if (val < chan->push.vma.offset || + val > chan->push.vma.offset + (chan->dma.max << 2)) + return -EINVAL; + + return (val - chan->push.vma.offset) >> 2; +} + +void +nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, + int delta, int length) +{ + struct nouveau_bo *pb = chan->push.buffer; + struct nouveau_vma *vma; + int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; + u64 offset; + + vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm); + BUG_ON(!vma); + offset = vma->offset + delta; + + BUG_ON(chan->dma.ib_free < 1); + + nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); + nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); + + chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; + + DRM_MEMORYBARRIER(); + /* Flush writes. */ + nouveau_bo_rd32(pb, 0); + + nv_wo32(chan->object, 0x8c, chan->dma.ib_put); + chan->dma.ib_free--; +} + +static int +nv50_dma_push_wait(struct nouveau_channel *chan, int count) +{ + uint32_t cnt = 0, prev_get = 0; + + while (chan->dma.ib_free < count) { + uint32_t get = nv_ro32(chan->object, 0x88); + if (get != prev_get) { + prev_get = get; + cnt = 0; + } + + if ((++cnt & 0xff) == 0) { + DRM_UDELAY(1); + if (cnt > 100000) + return -EBUSY; + } + + chan->dma.ib_free = get - chan->dma.ib_put; + if (chan->dma.ib_free <= 0) + chan->dma.ib_free += chan->dma.ib_max; + } + + return 0; +} + +static int +nv50_dma_wait(struct nouveau_channel *chan, int slots, int count) +{ + uint64_t prev_get = 0; + int ret, cnt = 0; + + ret = nv50_dma_push_wait(chan, slots + 1); + if (unlikely(ret)) + return ret; + + while (chan->dma.free < count) { + int get = READ_GET(chan, &prev_get, &cnt); + if (unlikely(get < 0)) { + if (get == -EINVAL) + continue; + + return get; + } + + if (get <= chan->dma.cur) { + chan->dma.free = chan->dma.max - chan->dma.cur; + if (chan->dma.free >= count) + break; + + FIRE_RING(chan); + do { + get = READ_GET(chan, &prev_get, &cnt); + if (unlikely(get < 0)) { + if (get == -EINVAL) + continue; + return get; + } + } while (get == 0); + chan->dma.cur = 0; + chan->dma.put = 0; + } + + chan->dma.free = get - chan->dma.cur - 1; + } + + return 0; +} + +int +nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size) +{ + uint64_t prev_get = 0; + int cnt = 0, get; + + if (chan->dma.ib_max) + return nv50_dma_wait(chan, slots, size); + + while (chan->dma.free < size) { + get = READ_GET(chan, &prev_get, &cnt); + if (unlikely(get == -EBUSY)) + return -EBUSY; + + /* loop until we have a usable GET pointer. the value + * we read from the GPU may be outside the main ring if + * PFIFO is processing a buffer called from the main ring, + * discard these values until something sensible is seen. + * + * the other case we discard GET is while the GPU is fetching + * from the SKIPS area, so the code below doesn't have to deal + * with some fun corner cases. + */ + if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS) + continue; + + if (get <= chan->dma.cur) { + /* engine is fetching behind us, or is completely + * idle (GET == PUT) so we have free space up until + * the end of the push buffer + * + * we can only hit that path once per call due to + * looping back to the beginning of the push buffer, + * we'll hit the fetching-ahead-of-us path from that + * point on. + * + * the *one* exception to that rule is if we read + * GET==PUT, in which case the below conditional will + * always succeed and break us out of the wait loop. + */ + chan->dma.free = chan->dma.max - chan->dma.cur; + if (chan->dma.free >= size) + break; + + /* not enough space left at the end of the push buffer, + * instruct the GPU to jump back to the start right + * after processing the currently pending commands. + */ + OUT_RING(chan, chan->push.vma.offset | 0x20000000); + + /* wait for GET to depart from the skips area. + * prevents writing GET==PUT and causing a race + * condition that causes us to think the GPU is + * idle when it's not. + */ + do { + get = READ_GET(chan, &prev_get, &cnt); + if (unlikely(get == -EBUSY)) + return -EBUSY; + if (unlikely(get == -EINVAL)) + continue; + } while (get <= NOUVEAU_DMA_SKIPS); + WRITE_PUT(NOUVEAU_DMA_SKIPS); + + /* we're now submitting commands at the start of + * the push buffer. + */ + chan->dma.cur = + chan->dma.put = NOUVEAU_DMA_SKIPS; + } + + /* engine fetching ahead of us, we have space up until the + * current GET pointer. the "- 1" is to ensure there's + * space left to emit a jump back to the beginning of the + * push buffer if we require it. we can never get GET == PUT + * here, so this is safe. + */ + chan->dma.free = get - chan->dma.cur - 1; + } + + return 0; +} + diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h new file mode 100644 index 0000000..690d593 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h @@ -0,0 +1,211 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NOUVEAU_DMA_H__ +#define __NOUVEAU_DMA_H__ + +#include "nouveau_bo.h" +#include "nouveau_chan.h" + +int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); +void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *, + int delta, int length); + +/* + * There's a hw race condition where you can't jump to your PUT offset, + * to avoid this we jump to offset + SKIPS and fill the difference with + * NOPs. + * + * xf86-video-nv configures the DMA fetch size to 32 bytes, and uses + * a SKIPS value of 8. Lets assume that the race condition is to do + * with writing into the fetch area, we configure a fetch size of 128 + * bytes so we need a larger SKIPS value. + */ +#define NOUVEAU_DMA_SKIPS (128 / 4) + +/* Hardcoded object assignments to subchannels (subchannel id). */ +enum { + NvSubCtxSurf2D = 0, + NvSubSw = 1, + NvSubImageBlit = 2, + NvSub2D = 3, + NvSubGdiRect = 3, + NvSubCopy = 4, +}; + +/* Object handles. */ +enum { + NvM2MF = 0x80000001, + NvDmaFB = 0x80000002, + NvDmaTT = 0x80000003, + NvNotify0 = 0x80000006, + Nv2D = 0x80000007, + NvCtxSurf2D = 0x80000008, + NvRop = 0x80000009, + NvImagePatt = 0x8000000a, + NvClipRect = 0x8000000b, + NvGdiRect = 0x8000000c, + NvImageBlit = 0x8000000d, + NvSw = 0x8000000e, + NvSema = 0x8000000f, + NvEvoSema0 = 0x80000010, + NvEvoSema1 = 0x80000011, + NvNotify1 = 0x80000012, + + /* G80+ display objects */ + NvEvoVRAM = 0x01000000, + NvEvoFB16 = 0x01000001, + NvEvoFB32 = 0x01000002, + NvEvoVRAM_LP = 0x01000003, + NvEvoSync = 0xcafe0000 +}; + +#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039 +#define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000 +#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050 +#define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100 +#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104 +#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000 +#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001 +#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY 0x00000180 +#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE 0x00000184 +#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c + +#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039 +#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200 +#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c +#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238 +#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c + +static __must_check inline int +RING_SPACE(struct nouveau_channel *chan, int size) +{ + int ret; + + ret = nouveau_dma_wait(chan, 1, size); + if (ret) + return ret; + + chan->dma.free -= size; + return 0; +} + +static inline void +OUT_RING(struct nouveau_channel *chan, int data) +{ + nouveau_bo_wr32(chan->push.buffer, chan->dma.cur++, data); +} + +extern void +OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords); + +static inline void +BEGIN_NV04(struct nouveau_channel *chan, int subc, int mthd, int size) +{ + OUT_RING(chan, 0x00000000 | (subc << 13) | (size << 18) | mthd); +} + +static inline void +BEGIN_NI04(struct nouveau_channel *chan, int subc, int mthd, int size) +{ + OUT_RING(chan, 0x40000000 | (subc << 13) | (size << 18) | mthd); +} + +static inline void +BEGIN_NVC0(struct nouveau_channel *chan, int subc, int mthd, int size) +{ + OUT_RING(chan, 0x20000000 | (size << 16) | (subc << 13) | (mthd >> 2)); +} + +static inline void +BEGIN_NIC0(struct nouveau_channel *chan, int subc, int mthd, int size) +{ + OUT_RING(chan, 0x60000000 | (size << 16) | (subc << 13) | (mthd >> 2)); +} + +static inline void +BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data) +{ + OUT_RING(chan, 0x80000000 | (data << 16) | (subc << 13) | (mthd >> 2)); +} + +#define WRITE_PUT(val) do { \ + DRM_MEMORYBARRIER(); \ + nouveau_bo_rd32(chan->push.buffer, 0); \ + nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset); \ +} while (0) + +static inline void +FIRE_RING(struct nouveau_channel *chan) +{ + if (chan->dma.cur == chan->dma.put) + return; + chan->accel_done = true; + + if (chan->dma.ib_max) { + nv50_dma_push(chan, chan->push.buffer, chan->dma.put << 2, + (chan->dma.cur - chan->dma.put) << 2); + } else { + WRITE_PUT(chan->dma.cur); + } + + chan->dma.put = chan->dma.cur; +} + +static inline void +WIND_RING(struct nouveau_channel *chan) +{ + chan->dma.cur = chan->dma.put; +} + +/* FIFO methods */ +#define NV01_SUBCHAN_OBJECT 0x00000000 +#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH 0x00000010 +#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_LOW 0x00000014 +#define NV84_SUBCHAN_SEMAPHORE_SEQUENCE 0x00000018 +#define NV84_SUBCHAN_SEMAPHORE_TRIGGER 0x0000001c +#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001 +#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002 +#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004 +#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000 +#define NV84_SUBCHAN_UEVENT 0x00000020 +#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024 +#define NV10_SUBCHAN_REF_CNT 0x00000050 +#define NVSW_SUBCHAN_PAGE_FLIP 0x00000054 +#define NV11_SUBCHAN_DMA_SEMAPHORE 0x00000060 +#define NV11_SUBCHAN_SEMAPHORE_OFFSET 0x00000064 +#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE 0x00000068 +#define NV11_SUBCHAN_SEMAPHORE_RELEASE 0x0000006c +#define NV40_SUBCHAN_YIELD 0x00000080 + +/* NV_SW object class */ +#define NV_SW_DMA_VBLSEM 0x0000018c +#define NV_SW_VBLSEM_OFFSET 0x00000400 +#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404 +#define NV_SW_VBLSEM_RELEASE 0x00000408 +#define NV_SW_PAGE_FLIP 0x00000500 + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c new file mode 100644 index 0000000..36fd225 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -0,0 +1,96 @@ +/* + * Copyright 2009 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <drm/drmP.h> +#include <drm/drm_dp_helper.h> + +#include "nouveau_drm.h" +#include "nouveau_connector.h" +#include "nouveau_encoder.h" +#include "nouveau_crtc.h" + +#include <core/class.h> + +#include <subdev/gpio.h> +#include <subdev/i2c.h> + +static void +nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch, + u8 *dpcd) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + u8 buf[3]; + + if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) + return; + + if (!nv_rdaux(auxch, DP_SINK_OUI, buf, 3)) + NV_DEBUG(drm, "Sink OUI: %02hx%02hx%02hx\n", + buf[0], buf[1], buf[2]); + + if (!nv_rdaux(auxch, DP_BRANCH_OUI, buf, 3)) + NV_DEBUG(drm, "Branch OUI: %02hx%02hx%02hx\n", + buf[0], buf[1], buf[2]); + +} + +bool +nouveau_dp_detect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_i2c_port *auxch; + u8 *dpcd = nv_encoder->dp.dpcd; + int ret; + + auxch = nv_encoder->i2c; + if (!auxch) + return false; + + ret = nv_rdaux(auxch, DP_DPCD_REV, dpcd, 8); + if (ret) + return false; + + nv_encoder->dp.link_bw = 27000 * dpcd[1]; + nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK; + + NV_DEBUG(drm, "display: %dx%d dpcd 0x%02x\n", + nv_encoder->dp.link_nr, nv_encoder->dp.link_bw, dpcd[0]); + NV_DEBUG(drm, "encoder: %dx%d\n", + nv_encoder->dcb->dpconf.link_nr, + nv_encoder->dcb->dpconf.link_bw); + + if (nv_encoder->dcb->dpconf.link_nr < nv_encoder->dp.link_nr) + nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr; + if (nv_encoder->dcb->dpconf.link_bw < nv_encoder->dp.link_bw) + nv_encoder->dp.link_bw = nv_encoder->dcb->dpconf.link_bw; + + NV_DEBUG(drm, "maximum: %dx%d\n", + nv_encoder->dp.link_nr, nv_encoder->dp.link_bw); + + nouveau_dp_probe_oui(dev, auxch, dpcd); + + return true; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c new file mode 100644 index 0000000..383f4e6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -0,0 +1,799 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <linux/console.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include <core/device.h> +#include <core/client.h> +#include <core/gpuobj.h> +#include <core/class.h> + +#include <engine/device.h> +#include <engine/disp.h> +#include <engine/fifo.h> + +#include <subdev/vm.h> + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_ttm.h" +#include "nouveau_gem.h" +#include "nouveau_agp.h" +#include "nouveau_vga.h" +#include "nouveau_pm.h" +#include "nouveau_acpi.h" +#include "nouveau_bios.h" +#include "nouveau_ioctl.h" +#include "nouveau_abi16.h" +#include "nouveau_fbcon.h" +#include "nouveau_fence.h" +#include "nouveau_debugfs.h" + +MODULE_PARM_DESC(config, "option string to pass to driver core"); +static char *nouveau_config; +module_param_named(config, nouveau_config, charp, 0400); + +MODULE_PARM_DESC(debug, "debug string to pass to driver core"); +static char *nouveau_debug; +module_param_named(debug, nouveau_debug, charp, 0400); + +MODULE_PARM_DESC(noaccel, "disable kernel/abi16 acceleration"); +static int nouveau_noaccel = 0; +module_param_named(noaccel, nouveau_noaccel, int, 0400); + +MODULE_PARM_DESC(modeset, "enable driver (default: auto, " + "0 = disabled, 1 = enabled, 2 = headless)"); +int nouveau_modeset = -1; +module_param_named(modeset, nouveau_modeset, int, 0400); + +static struct drm_driver driver; + +static int +nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head) +{ + struct nouveau_drm *drm = + container_of(event, struct nouveau_drm, vblank[head]); + drm_handle_vblank(drm->dev, head); + return NVKM_EVENT_KEEP; +} + +static int +nouveau_drm_vblank_enable(struct drm_device *dev, int head) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_disp *pdisp = nouveau_disp(drm->device); + + if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank))) + return -EIO; + WARN_ON_ONCE(drm->vblank[head].func); + drm->vblank[head].func = nouveau_drm_vblank_handler; + nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]); + return 0; +} + +static void +nouveau_drm_vblank_disable(struct drm_device *dev, int head) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_disp *pdisp = nouveau_disp(drm->device); + if (drm->vblank[head].func) + nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]); + else + WARN_ON_ONCE(1); + drm->vblank[head].func = NULL; +} + +static u64 +nouveau_name(struct pci_dev *pdev) +{ + u64 name = (u64)pci_domain_nr(pdev->bus) << 32; + name |= pdev->bus->number << 16; + name |= PCI_SLOT(pdev->devfn) << 8; + return name | PCI_FUNC(pdev->devfn); +} + +static int +nouveau_cli_create(struct pci_dev *pdev, const char *name, + int size, void **pcli) +{ + struct nouveau_cli *cli; + int ret; + + *pcli = NULL; + ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config, + nouveau_debug, size, pcli); + cli = *pcli; + if (ret) { + if (cli) + nouveau_client_destroy(&cli->base); + *pcli = NULL; + return ret; + } + + mutex_init(&cli->mutex); + return 0; +} + +static void +nouveau_cli_destroy(struct nouveau_cli *cli) +{ + struct nouveau_object *client = nv_object(cli); + nouveau_vm_ref(NULL, &cli->base.vm, NULL); + nouveau_client_fini(&cli->base, false); + atomic_set(&client->refcount, 1); + nouveau_object_ref(NULL, &client); +} + +static void +nouveau_accel_fini(struct nouveau_drm *drm) +{ + nouveau_gpuobj_ref(NULL, &drm->notify); + nouveau_channel_del(&drm->channel); + nouveau_channel_del(&drm->cechan); + if (drm->fence) + nouveau_fence(drm)->dtor(drm); +} + +static void +nouveau_accel_init(struct nouveau_drm *drm) +{ + struct nouveau_device *device = nv_device(drm->device); + struct nouveau_object *object; + u32 arg0, arg1; + int ret; + + if (nouveau_noaccel || !nouveau_fifo(device) /*XXX*/) + return; + + /* initialise synchronisation routines */ + if (device->card_type < NV_10) ret = nv04_fence_create(drm); + else if (device->chipset < 0x17) ret = nv10_fence_create(drm); + else if (device->card_type < NV_50) ret = nv17_fence_create(drm); + else if (device->chipset < 0x84) ret = nv50_fence_create(drm); + else if (device->card_type < NV_C0) ret = nv84_fence_create(drm); + else ret = nvc0_fence_create(drm); + if (ret) { + NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret); + nouveau_accel_fini(drm); + return; + } + + if (device->card_type >= NV_E0) { + ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, + NVDRM_CHAN + 1, + NVE0_CHANNEL_IND_ENGINE_CE0 | + NVE0_CHANNEL_IND_ENGINE_CE1, 0, + &drm->cechan); + if (ret) + NV_ERROR(drm, "failed to create ce channel, %d\n", ret); + + arg0 = NVE0_CHANNEL_IND_ENGINE_GR; + arg1 = 1; + } else { + arg0 = NvDmaFB; + arg1 = NvDmaTT; + } + + ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN, + arg0, arg1, &drm->channel); + if (ret) { + NV_ERROR(drm, "failed to create kernel channel, %d\n", ret); + nouveau_accel_fini(drm); + return; + } + + if (device->card_type < NV_C0) { + ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0, + &drm->notify); + if (ret) { + NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); + nouveau_accel_fini(drm); + return; + } + + ret = nouveau_object_new(nv_object(drm), + drm->channel->handle, NvNotify0, + 0x003d, &(struct nv_dma_class) { + .flags = NV_DMA_TARGET_VRAM | + NV_DMA_ACCESS_RDWR, + .start = drm->notify->addr, + .limit = drm->notify->addr + 31 + }, sizeof(struct nv_dma_class), + &object); + if (ret) { + nouveau_accel_fini(drm); + return; + } + } + + + nouveau_bo_move_init(drm); +} + +static int nouveau_drm_probe(struct pci_dev *pdev, + const struct pci_device_id *pent) +{ + struct nouveau_device *device; + struct apertures_struct *aper; + bool boot = false; + int ret; + + /* remove conflicting drivers (vesafb, efifb etc) */ + aper = alloc_apertures(3); + if (!aper) + return -ENOMEM; + + aper->ranges[0].base = pci_resource_start(pdev, 1); + aper->ranges[0].size = pci_resource_len(pdev, 1); + aper->count = 1; + + if (pci_resource_len(pdev, 2)) { + aper->ranges[aper->count].base = pci_resource_start(pdev, 2); + aper->ranges[aper->count].size = pci_resource_len(pdev, 2); + aper->count++; + } + + if (pci_resource_len(pdev, 3)) { + aper->ranges[aper->count].base = pci_resource_start(pdev, 3); + aper->ranges[aper->count].size = pci_resource_len(pdev, 3); + aper->count++; + } + +#ifdef CONFIG_X86 + boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; +#endif + remove_conflicting_framebuffers(aper, "nouveaufb", boot); + kfree(aper); + + ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev), + nouveau_config, nouveau_debug, &device); + if (ret) + return ret; + + pci_set_master(pdev); + + ret = drm_get_pci_dev(pdev, pent, &driver); + if (ret) { + nouveau_object_ref(NULL, (struct nouveau_object **)&device); + return ret; + } + + return 0; +} + +static struct lock_class_key drm_client_lock_class_key; + +static int +nouveau_drm_load(struct drm_device *dev, unsigned long flags) +{ + struct pci_dev *pdev = dev->pdev; + struct nouveau_device *device; + struct nouveau_drm *drm; + int ret; + + ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); + if (ret) + return ret; + lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key); + + dev->dev_private = drm; + drm->dev = dev; + + INIT_LIST_HEAD(&drm->clients); + spin_lock_init(&drm->tile.lock); + + /* make sure AGP controller is in a consistent state before we + * (possibly) execute vbios init tables (see nouveau_agp.h) + */ + if (drm_pci_device_is_agp(dev) && dev->agp) { + /* dummy device object, doesn't init anything, but allows + * agp code access to registers + */ + ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT, + NVDRM_DEVICE, 0x0080, + &(struct nv_device_class) { + .device = ~0, + .disable = + ~(NV_DEVICE_DISABLE_MMIO | + NV_DEVICE_DISABLE_IDENTIFY), + .debug0 = ~0, + }, sizeof(struct nv_device_class), + &drm->device); + if (ret) + goto fail_device; + + nouveau_agp_reset(drm); + nouveau_object_del(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE); + } + + ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE, + 0x0080, &(struct nv_device_class) { + .device = ~0, + .disable = 0, + .debug0 = 0, + }, sizeof(struct nv_device_class), + &drm->device); + if (ret) + goto fail_device; + + /* workaround an odd issue on nvc1 by disabling the device's + * nosnoop capability. hopefully won't cause issues until a + * better fix is found - assuming there is one... + */ + device = nv_device(drm->device); + if (nv_device(drm->device)->chipset == 0xc1) + nv_mask(device, 0x00088080, 0x00000800, 0x00000000); + + nouveau_vga_init(drm); + nouveau_agp_init(drm); + + if (device->card_type >= NV_50) { + ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40), + 0x1000, &drm->client.base.vm); + if (ret) + goto fail_device; + } + + ret = nouveau_ttm_init(drm); + if (ret) + goto fail_ttm; + + ret = nouveau_bios_init(dev); + if (ret) + goto fail_bios; + + ret = nouveau_display_create(dev); + if (ret) + goto fail_dispctor; + + if (dev->mode_config.num_crtc) { + ret = nouveau_display_init(dev); + if (ret) + goto fail_dispinit; + } + + nouveau_pm_init(dev); + + nouveau_accel_init(drm); + nouveau_fbcon_init(dev); + return 0; + +fail_dispinit: + nouveau_display_destroy(dev); +fail_dispctor: + nouveau_bios_takedown(dev); +fail_bios: + nouveau_ttm_fini(drm); +fail_ttm: + nouveau_agp_fini(drm); + nouveau_vga_fini(drm); +fail_device: + nouveau_cli_destroy(&drm->client); + return ret; +} + +static int +nouveau_drm_unload(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + nouveau_fbcon_fini(dev); + nouveau_accel_fini(drm); + + nouveau_pm_fini(dev); + + if (dev->mode_config.num_crtc) + nouveau_display_fini(dev); + nouveau_display_destroy(dev); + + nouveau_bios_takedown(dev); + + nouveau_ttm_fini(drm); + nouveau_agp_fini(drm); + nouveau_vga_fini(drm); + + nouveau_cli_destroy(&drm->client); + return 0; +} + +static void +nouveau_drm_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_object *device; + + device = drm->client.base.device; + drm_put_dev(dev); + + nouveau_object_ref(NULL, &device); + nouveau_object_debug(); +} + +static int +nouveau_do_suspend(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_cli *cli; + int ret; + + if (dev->mode_config.num_crtc) { + NV_INFO(drm, "suspending fbcon...\n"); + nouveau_fbcon_set_suspend(dev, 1); + + NV_INFO(drm, "suspending display...\n"); + ret = nouveau_display_suspend(dev); + if (ret) + return ret; + } + + NV_INFO(drm, "evicting buffers...\n"); + ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM); + + NV_INFO(drm, "waiting for kernel channels to go idle...\n"); + if (drm->cechan) { + ret = nouveau_channel_idle(drm->cechan); + if (ret) + return ret; + } + + if (drm->channel) { + ret = nouveau_channel_idle(drm->channel); + if (ret) + return ret; + } + + NV_INFO(drm, "suspending client object trees...\n"); + if (drm->fence && nouveau_fence(drm)->suspend) { + if (!nouveau_fence(drm)->suspend(drm)) + return -ENOMEM; + } + + list_for_each_entry(cli, &drm->clients, head) { + ret = nouveau_client_fini(&cli->base, true); + if (ret) + goto fail_client; + } + + NV_INFO(drm, "suspending kernel object tree...\n"); + ret = nouveau_client_fini(&drm->client.base, true); + if (ret) + goto fail_client; + + nouveau_agp_fini(drm); + return 0; + +fail_client: + list_for_each_entry_continue_reverse(cli, &drm->clients, head) { + nouveau_client_init(&cli->base); + } + + if (dev->mode_config.num_crtc) { + NV_INFO(drm, "resuming display...\n"); + nouveau_display_resume(dev); + } + return ret; +} + +int nouveau_pmops_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + int ret; + + if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + + ret = nouveau_do_suspend(drm_dev); + if (ret) + return ret; + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int +nouveau_do_resume(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_cli *cli; + + NV_INFO(drm, "re-enabling device...\n"); + + nouveau_agp_reset(drm); + + NV_INFO(drm, "resuming kernel object tree...\n"); + nouveau_client_init(&drm->client.base); + nouveau_agp_init(drm); + + NV_INFO(drm, "resuming client object trees...\n"); + if (drm->fence && nouveau_fence(drm)->resume) + nouveau_fence(drm)->resume(drm); + + list_for_each_entry(cli, &drm->clients, head) { + nouveau_client_init(&cli->base); + } + + nouveau_run_vbios_init(dev); + nouveau_pm_resume(dev); + + if (dev->mode_config.num_crtc) { + NV_INFO(drm, "resuming display...\n"); + nouveau_display_resume(dev); + } + return 0; +} + +int nouveau_pmops_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + int ret; + + if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + pci_set_master(pdev); + + return nouveau_do_resume(drm_dev); +} + +static int nouveau_pmops_freeze(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + return nouveau_do_suspend(drm_dev); +} + +static int nouveau_pmops_thaw(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + return nouveau_do_resume(drm_dev); +} + + +static int +nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) +{ + struct pci_dev *pdev = dev->pdev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_cli *cli; + char name[32], tmpname[TASK_COMM_LEN]; + int ret; + + get_task_comm(tmpname, current); + snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); + + ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli); + if (ret) + return ret; + + if (nv_device(drm->device)->card_type >= NV_50) { + ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40), + 0x1000, &cli->base.vm); + if (ret) { + nouveau_cli_destroy(cli); + return ret; + } + } + + fpriv->driver_priv = cli; + + mutex_lock(&drm->client.mutex); + list_add(&cli->head, &drm->clients); + mutex_unlock(&drm->client.mutex); + return 0; +} + +static void +nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv) +{ + struct nouveau_cli *cli = nouveau_cli(fpriv); + struct nouveau_drm *drm = nouveau_drm(dev); + + if (cli->abi16) + nouveau_abi16_fini(cli->abi16); + + mutex_lock(&drm->client.mutex); + list_del(&cli->head); + mutex_unlock(&drm->client.mutex); +} + +static void +nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv) +{ + struct nouveau_cli *cli = nouveau_cli(fpriv); + nouveau_cli_destroy(cli); +} + +static struct drm_ioctl_desc +nouveau_ioctls[] = { + DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), +}; + +static const struct file_operations +nouveau_driver_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .mmap = nouveau_ttm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + .read = drm_read, +#if defined(CONFIG_COMPAT) + .compat_ioctl = nouveau_compat_ioctl, +#endif + .llseek = noop_llseek, +}; + +static struct drm_driver +driver = { + .driver_features = + DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | + DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME, + + .load = nouveau_drm_load, + .unload = nouveau_drm_unload, + .open = nouveau_drm_open, + .preclose = nouveau_drm_preclose, + .postclose = nouveau_drm_postclose, + .lastclose = nouveau_vga_lastclose, + +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = nouveau_debugfs_init, + .debugfs_cleanup = nouveau_debugfs_takedown, +#endif + + .get_vblank_counter = drm_vblank_count, + .enable_vblank = nouveau_drm_vblank_enable, + .disable_vblank = nouveau_drm_vblank_disable, + + .ioctls = nouveau_ioctls, + .fops = &nouveau_driver_fops, + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_pin = nouveau_gem_prime_pin, + .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table, + .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table, + .gem_prime_vmap = nouveau_gem_prime_vmap, + .gem_prime_vunmap = nouveau_gem_prime_vunmap, + + .gem_init_object = nouveau_gem_object_new, + .gem_free_object = nouveau_gem_object_del, + .gem_open_object = nouveau_gem_object_open, + .gem_close_object = nouveau_gem_object_close, + + .dumb_create = nouveau_display_dumb_create, + .dumb_map_offset = nouveau_display_dumb_map_offset, + .dumb_destroy = nouveau_display_dumb_destroy, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, +#ifdef GIT_REVISION + .date = GIT_REVISION, +#else + .date = DRIVER_DATE, +#endif + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static struct pci_device_id +nouveau_drm_pci_table[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), + .class = PCI_BASE_CLASS_DISPLAY << 16, + .class_mask = 0xff << 16, + }, + { + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID), + .class = PCI_BASE_CLASS_DISPLAY << 16, + .class_mask = 0xff << 16, + }, + {} +}; + +static const struct dev_pm_ops nouveau_pm_ops = { + .suspend = nouveau_pmops_suspend, + .resume = nouveau_pmops_resume, + .freeze = nouveau_pmops_freeze, + .thaw = nouveau_pmops_thaw, + .poweroff = nouveau_pmops_freeze, + .restore = nouveau_pmops_resume, +}; + +static struct pci_driver +nouveau_drm_pci_driver = { + .name = "nouveau", + .id_table = nouveau_drm_pci_table, + .probe = nouveau_drm_probe, + .remove = nouveau_drm_remove, + .driver.pm = &nouveau_pm_ops, +}; + +static int __init +nouveau_drm_init(void) +{ + driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls); + + if (nouveau_modeset == -1) { +#ifdef CONFIG_VGA_CONSOLE + if (vgacon_text_force()) + nouveau_modeset = 0; +#endif + } + + if (!nouveau_modeset) + return 0; + + nouveau_register_dsm_handler(); + return drm_pci_init(&driver, &nouveau_drm_pci_driver); +} + +static void __exit +nouveau_drm_exit(void) +{ + if (!nouveau_modeset) + return; + + drm_pci_exit(&driver, &nouveau_drm_pci_driver); + nouveau_unregister_dsm_handler(); +} + +module_init(nouveau_drm_init); +module_exit(nouveau_drm_exit); + +MODULE_DEVICE_TABLE(pci, nouveau_drm_pci_table); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h new file mode 100644 index 0000000..f2b30f8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h @@ -0,0 +1,159 @@ +#ifndef __NOUVEAU_DRMCLI_H__ +#define __NOUVEAU_DRMCLI_H__ + +#define DRIVER_AUTHOR "Nouveau Project" +#define DRIVER_EMAIL "nouveau@lists.freedesktop.org" + +#define DRIVER_NAME "nouveau" +#define DRIVER_DESC "nVidia Riva/TNT/GeForce/Quadro/Tesla" +#define DRIVER_DATE "20120801" + +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 1 +#define DRIVER_PATCHLEVEL 1 + +/* + * 1.1.1: + * - added support for tiled system memory buffer objects + * - added support for NOUVEAU_GETPARAM_GRAPH_UNITS on [nvc0,nve0]. + * - added support for compressed memory storage types on [nvc0,nve0]. + * - added support for software methods 0x600,0x644,0x6ac on nvc0 + * to control registers on the MPs to enable performance counters, + * and to control the warp error enable mask (OpenGL requires out of + * bounds access to local memory to be silently ignored / return 0). + */ + +#include <core/client.h> +#include <core/event.h> + +#include <subdev/vm.h> + +#include <drmP.h> +#include <drm/nouveau_drm.h> + +#include <drm/ttm/ttm_bo_api.h> +#include <drm/ttm/ttm_bo_driver.h> +#include <drm/ttm/ttm_placement.h> +#include <drm/ttm/ttm_memory.h> +#include <drm/ttm/ttm_module.h> +#include <drm/ttm/ttm_page_alloc.h> + +struct nouveau_channel; + +#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) + +#include "nouveau_fence.h" +#include "nouveau_bios.h" + +struct nouveau_drm_tile { + struct nouveau_fence *fence; + bool used; +}; + +enum nouveau_drm_handle { + NVDRM_CLIENT = 0xffffffff, + NVDRM_DEVICE = 0xdddddddd, + NVDRM_PUSH = 0xbbbb0000, /* |= client chid */ + NVDRM_CHAN = 0xcccc0000, /* |= client chid */ +}; + +struct nouveau_cli { + struct nouveau_client base; + struct list_head head; + struct mutex mutex; + void *abi16; +}; + +static inline struct nouveau_cli * +nouveau_cli(struct drm_file *fpriv) +{ + return fpriv ? fpriv->driver_priv : NULL; +} + +struct nouveau_drm { + struct nouveau_cli client; + struct drm_device *dev; + + struct nouveau_object *device; + struct list_head clients; + + struct { + enum { + UNKNOWN = 0, + DISABLE = 1, + ENABLED = 2 + } stat; + u32 base; + u32 size; + } agp; + + /* TTM interface support */ + struct { + struct drm_global_reference mem_global_ref; + struct ttm_bo_global_ref bo_global_ref; + struct ttm_bo_device bdev; + atomic_t validate_sequence; + int (*move)(struct nouveau_channel *, + struct ttm_buffer_object *, + struct ttm_mem_reg *, struct ttm_mem_reg *); + int mtrr; + } ttm; + + /* GEM interface support */ + struct { + u64 vram_available; + u64 gart_available; + } gem; + + /* synchronisation */ + void *fence; + + /* context for accelerated drm-internal operations */ + struct nouveau_channel *cechan; + struct nouveau_channel *channel; + struct nouveau_gpuobj *notify; + struct nouveau_fbdev *fbcon; + + /* nv10-nv40 tiling regions */ + struct { + struct nouveau_drm_tile reg[15]; + spinlock_t lock; + } tile; + + /* modesetting */ + struct nvbios vbios; + struct nouveau_display *display; + struct backlight_device *backlight; + struct nouveau_eventh vblank[4]; + + /* power management */ + struct nouveau_pm *pm; +}; + +static inline struct nouveau_drm * +nouveau_drm(struct drm_device *dev) +{ + return dev->dev_private; +} + +static inline struct nouveau_device * +nouveau_dev(struct drm_device *dev) +{ + return nv_device(nouveau_drm(dev)->device); +} + +int nouveau_pmops_suspend(struct device *); +int nouveau_pmops_resume(struct device *); + +#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args) +#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args) +#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args) +#define NV_INFO(cli, fmt, args...) nv_info((cli), fmt, ##args) +#define NV_DEBUG(cli, fmt, args...) do { \ + if (drm_debug & DRM_UT_DRIVER) \ + nv_info((cli), fmt, ##args); \ +} while (0) + +extern int nouveau_modeset; + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h new file mode 100644 index 0000000..24660c0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NOUVEAU_ENCODER_H__ +#define __NOUVEAU_ENCODER_H__ + +#include <subdev/bios/dcb.h> + +#include <drm/drm_encoder_slave.h> +#include "dispnv04/disp.h" + +#define NV_DPMS_CLEARED 0x80 + +struct nouveau_i2c_port; + +struct nouveau_encoder { + struct drm_encoder_slave base; + + struct dcb_output *dcb; + int or; + struct nouveau_i2c_port *i2c; + + /* different to drm_encoder.crtc, this reflects what's + * actually programmed on the hw, not the proposed crtc */ + struct drm_crtc *crtc; + + struct drm_display_mode mode; + int last_dpms; + + struct nv04_output_reg restore; + + union { + struct { + u8 dpcd[8]; + int link_nr; + int link_bw; + u32 datarate; + } dp; + }; +}; + +struct nouveau_encoder * +find_encoder(struct drm_connector *connector, int type); + +static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc) +{ + struct drm_encoder_slave *slave = to_encoder_slave(enc); + + return container_of(slave, struct nouveau_encoder, base); +} + +static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc) +{ + return &enc->base.base; +} + +static inline struct drm_encoder_slave_funcs * +get_slave_funcs(struct drm_encoder *enc) +{ + return to_encoder_slave(enc)->slave_funcs; +} + +/* nouveau_dp.c */ +bool nouveau_dp_detect(struct drm_encoder *); +void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate, + struct nouveau_object *); + +struct nouveau_connector * +nouveau_encoder_connector_get(struct nouveau_encoder *encoder); + +#endif /* __NOUVEAU_ENCODER_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c new file mode 100644 index 0000000..b035317 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -0,0 +1,528 @@ +/* + * Copyright © 2007 David Airlie + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * David Airlie + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/tty.h> +#include <linux/sysrq.h> +#include <linux/delay.h> +#include <linux/fb.h> +#include <linux/init.h> +#include <linux/screen_info.h> +#include <linux/vga_switcheroo.h> +#include <linux/console.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_helper.h> + +#include "nouveau_drm.h" +#include "nouveau_gem.h" +#include "nouveau_bo.h" +#include "nouveau_fbcon.h" +#include "nouveau_chan.h" + +#include "nouveau_crtc.h" + +#include <core/client.h> +#include <core/device.h> + +#include <subdev/fb.h> + +MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); +static int nouveau_nofbaccel = 0; +module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); + +static void +nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->dev); + struct nouveau_device *device = nv_device(drm->device); + int ret; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + ret = -ENODEV; + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && + mutex_trylock(&drm->client.mutex)) { + if (device->card_type < NV_50) + ret = nv04_fbcon_fillrect(info, rect); + else + if (device->card_type < NV_C0) + ret = nv50_fbcon_fillrect(info, rect); + else + ret = nvc0_fbcon_fillrect(info, rect); + mutex_unlock(&drm->client.mutex); + } + + if (ret == 0) + return; + + if (ret != -ENODEV) + nouveau_fbcon_gpu_lockup(info); + cfb_fillrect(info, rect); +} + +static void +nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->dev); + struct nouveau_device *device = nv_device(drm->device); + int ret; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + ret = -ENODEV; + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && + mutex_trylock(&drm->client.mutex)) { + if (device->card_type < NV_50) + ret = nv04_fbcon_copyarea(info, image); + else + if (device->card_type < NV_C0) + ret = nv50_fbcon_copyarea(info, image); + else + ret = nvc0_fbcon_copyarea(info, image); + mutex_unlock(&drm->client.mutex); + } + + if (ret == 0) + return; + + if (ret != -ENODEV) + nouveau_fbcon_gpu_lockup(info); + cfb_copyarea(info, image); +} + +static void +nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->dev); + struct nouveau_device *device = nv_device(drm->device); + int ret; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + ret = -ENODEV; + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && + mutex_trylock(&drm->client.mutex)) { + if (device->card_type < NV_50) + ret = nv04_fbcon_imageblit(info, image); + else + if (device->card_type < NV_C0) + ret = nv50_fbcon_imageblit(info, image); + else + ret = nvc0_fbcon_imageblit(info, image); + mutex_unlock(&drm->client.mutex); + } + + if (ret == 0) + return; + + if (ret != -ENODEV) + nouveau_fbcon_gpu_lockup(info); + cfb_imageblit(info, image); +} + +static int +nouveau_fbcon_sync(struct fb_info *info) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->dev); + struct nouveau_channel *chan = drm->channel; + int ret; + + if (!chan || !chan->accel_done || in_interrupt() || + info->state != FBINFO_STATE_RUNNING || + info->flags & FBINFO_HWACCEL_DISABLED) + return 0; + + if (!mutex_trylock(&drm->client.mutex)) + return 0; + + ret = nouveau_channel_idle(chan); + mutex_unlock(&drm->client.mutex); + if (ret) { + nouveau_fbcon_gpu_lockup(info); + return 0; + } + + chan->accel_done = false; + return 0; +} + +static struct fb_ops nouveau_fbcon_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_fillrect = nouveau_fbcon_fillrect, + .fb_copyarea = nouveau_fbcon_copyarea, + .fb_imageblit = nouveau_fbcon_imageblit, + .fb_sync = nouveau_fbcon_sync, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, + .fb_setcmap = drm_fb_helper_setcmap, + .fb_debug_enter = drm_fb_helper_debug_enter, + .fb_debug_leave = drm_fb_helper_debug_leave, +}; + +static struct fb_ops nouveau_fbcon_sw_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, + .fb_setcmap = drm_fb_helper_setcmap, + .fb_debug_enter = drm_fb_helper_debug_enter, + .fb_debug_leave = drm_fb_helper_debug_leave, +}; + +static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, + u16 blue, int regno) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + nv_crtc->lut.r[regno] = red; + nv_crtc->lut.g[regno] = green; + nv_crtc->lut.b[regno] = blue; +} + +static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, int regno) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + + *red = nv_crtc->lut.r[regno]; + *green = nv_crtc->lut.g[regno]; + *blue = nv_crtc->lut.b[regno]; +} + +static void +nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon) +{ + struct fb_info *info = fbcon->helper.fbdev; + struct fb_fillrect rect; + + /* Clear the entire fbcon. The drm will program every connector + * with it's preferred mode. If the sizes differ, one display will + * quite likely have garbage around the console. + */ + rect.dx = rect.dy = 0; + rect.width = info->var.xres_virtual; + rect.height = info->var.yres_virtual; + rect.color = 0; + rect.rop = ROP_COPY; + info->fbops->fb_fillrect(info, &rect); +} + +static int +nouveau_fbcon_create(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper; + struct drm_device *dev = fbcon->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nv_device(drm->device); + struct fb_info *info; + struct drm_framebuffer *fb; + struct nouveau_framebuffer *nouveau_fb; + struct nouveau_channel *chan; + struct nouveau_bo *nvbo; + struct drm_mode_fb_cmd2 mode_cmd; + struct pci_dev *pdev = dev->pdev; + int size, ret; + + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + + mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3); + mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256); + + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + + size = mode_cmd.pitches[0] * mode_cmd.height; + size = roundup(size, PAGE_SIZE); + + ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM, + 0, 0x0000, &nvbo); + if (ret) { + NV_ERROR(drm, "failed to allocate framebuffer\n"); + goto out; + } + + ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM); + if (ret) { + NV_ERROR(drm, "failed to pin fb: %d\n", ret); + nouveau_bo_ref(NULL, &nvbo); + goto out; + } + + ret = nouveau_bo_map(nvbo); + if (ret) { + NV_ERROR(drm, "failed to map fb: %d\n", ret); + nouveau_bo_unpin(nvbo); + nouveau_bo_ref(NULL, &nvbo); + goto out; + } + + chan = nouveau_nofbaccel ? NULL : drm->channel; + if (chan && device->card_type >= NV_50) { + ret = nouveau_bo_vma_add(nvbo, nv_client(chan->cli)->vm, + &fbcon->nouveau_fb.vma); + if (ret) { + NV_ERROR(drm, "failed to map fb into chan: %d\n", ret); + chan = NULL; + } + } + + mutex_lock(&dev->struct_mutex); + + info = framebuffer_alloc(0, &pdev->dev); + if (!info) { + ret = -ENOMEM; + goto out_unref; + } + + ret = fb_alloc_cmap(&info->cmap, 256, 0); + if (ret) { + ret = -ENOMEM; + goto out_unref; + } + + info->par = fbcon; + + nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo); + + nouveau_fb = &fbcon->nouveau_fb; + fb = &nouveau_fb->base; + + /* setup helper */ + fbcon->helper.fb = fb; + fbcon->helper.fbdev = info; + + strcpy(info->fix.id, "nouveaufb"); + if (nouveau_nofbaccel) + info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED; + else + info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | + FBINFO_HWACCEL_FILLRECT | + FBINFO_HWACCEL_IMAGEBLIT; + info->flags |= FBINFO_CAN_FORCE_OUTPUT; + info->fbops = &nouveau_fbcon_sw_ops; + info->fix.smem_start = nvbo->bo.mem.bus.base + + nvbo->bo.mem.bus.offset; + info->fix.smem_len = size; + + info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); + info->screen_size = size; + + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height); + + /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ + + mutex_unlock(&dev->struct_mutex); + + if (chan) { + ret = -ENODEV; + if (device->card_type < NV_50) + ret = nv04_fbcon_accel_init(info); + else + if (device->card_type < NV_C0) + ret = nv50_fbcon_accel_init(info); + else + ret = nvc0_fbcon_accel_init(info); + + if (ret == 0) + info->fbops = &nouveau_fbcon_ops; + } + + nouveau_fbcon_zfill(dev, fbcon); + + /* To allow resizeing without swapping buffers */ + NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n", + nouveau_fb->base.width, nouveau_fb->base.height, + nvbo->bo.offset, nvbo); + + vga_switcheroo_client_fb_set(dev->pdev, info); + return 0; + +out_unref: + mutex_unlock(&dev->struct_mutex); +out: + return ret; +} + +void +nouveau_fbcon_output_poll_changed(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + drm_fb_helper_hotplug_event(&drm->fbcon->helper); +} + +static int +nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon) +{ + struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb; + struct fb_info *info; + + if (fbcon->helper.fbdev) { + info = fbcon->helper.fbdev; + unregister_framebuffer(info); + if (info->cmap.len) + fb_dealloc_cmap(&info->cmap); + framebuffer_release(info); + } + + if (nouveau_fb->nvbo) { + nouveau_bo_unmap(nouveau_fb->nvbo); + nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma); + drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); + nouveau_fb->nvbo = NULL; + } + drm_fb_helper_fini(&fbcon->helper); + drm_framebuffer_unregister_private(&nouveau_fb->base); + drm_framebuffer_cleanup(&nouveau_fb->base); + return 0; +} + +void nouveau_fbcon_gpu_lockup(struct fb_info *info) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->dev); + + NV_ERROR(drm, "GPU lockup - switching to software fbcon\n"); + info->flags |= FBINFO_HWACCEL_DISABLED; +} + +static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { + .gamma_set = nouveau_fbcon_gamma_set, + .gamma_get = nouveau_fbcon_gamma_get, + .fb_probe = nouveau_fbcon_create, +}; + + +int +nouveau_fbcon_init(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nouveau_fbdev *fbcon; + int preferred_bpp; + int ret; + + if (!dev->mode_config.num_crtc) + return 0; + + fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); + if (!fbcon) + return -ENOMEM; + + fbcon->dev = dev; + drm->fbcon = fbcon; + fbcon->helper.funcs = &nouveau_fbcon_helper_funcs; + + ret = drm_fb_helper_init(dev, &fbcon->helper, + dev->mode_config.num_crtc, 4); + if (ret) { + kfree(fbcon); + return ret; + } + + drm_fb_helper_single_add_all_connectors(&fbcon->helper); + + if (pfb->ram.size <= 32 * 1024 * 1024) + preferred_bpp = 8; + else + if (pfb->ram.size <= 64 * 1024 * 1024) + preferred_bpp = 16; + else + preferred_bpp = 32; + + /* disable all the possible outputs/crtcs before entering KMS mode */ + drm_helper_disable_unused_functions(dev); + + drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp); + return 0; +} + +void +nouveau_fbcon_fini(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + if (!drm->fbcon) + return; + + nouveau_fbcon_destroy(dev, drm->fbcon); + kfree(drm->fbcon); + drm->fbcon = NULL; +} + +void nouveau_fbcon_save_disable_accel(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; + drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; +} + +void nouveau_fbcon_restore_accel(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; +} + +void nouveau_fbcon_set_suspend(struct drm_device *dev, int state) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + console_lock(); + if (state == 0) + nouveau_fbcon_save_disable_accel(dev); + fb_set_suspend(drm->fbcon->helper.fbdev, state); + if (state == 1) + nouveau_fbcon_restore_accel(dev); + console_unlock(); +} + +void nouveau_fbcon_zfill_all(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + nouveau_fbcon_zfill(dev, drm->fbcon); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h new file mode 100644 index 0000000..fdfc0c9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NOUVEAU_FBCON_H__ +#define __NOUVEAU_FBCON_H__ + +#include <drm/drm_fb_helper.h> + +#include "nouveau_display.h" + +struct nouveau_fbdev { + struct drm_fb_helper helper; + struct nouveau_framebuffer nouveau_fb; + struct list_head fbdev_list; + struct drm_device *dev; + unsigned int saved_flags; +}; + +void nouveau_fbcon_restore(void); + +int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); +int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); +int nv04_fbcon_accel_init(struct fb_info *info); + +int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); +int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); +int nv50_fbcon_accel_init(struct fb_info *info); + +int nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +int nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); +int nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); +int nvc0_fbcon_accel_init(struct fb_info *info); + +void nouveau_fbcon_gpu_lockup(struct fb_info *info); + +int nouveau_fbcon_init(struct drm_device *dev); +void nouveau_fbcon_fini(struct drm_device *dev); +void nouveau_fbcon_set_suspend(struct drm_device *dev, int state); +void nouveau_fbcon_zfill_all(struct drm_device *dev); +void nouveau_fbcon_save_disable_accel(struct drm_device *dev); +void nouveau_fbcon_restore_accel(struct drm_device *dev); + +void nouveau_fbcon_output_poll_changed(struct drm_device *dev); +#endif /* __NV50_FBCON_H__ */ + diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c new file mode 100644 index 0000000..6c94683 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -0,0 +1,277 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <drm/drmP.h> + +#include <linux/ktime.h> +#include <linux/hrtimer.h> + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_fence.h" + +#include <engine/fifo.h> + +void +nouveau_fence_context_del(struct nouveau_fence_chan *fctx) +{ + struct nouveau_fence *fence, *fnext; + spin_lock(&fctx->lock); + list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { + fence->channel = NULL; + list_del(&fence->head); + nouveau_fence_unref(&fence); + } + spin_unlock(&fctx->lock); +} + +void +nouveau_fence_context_new(struct nouveau_fence_chan *fctx) +{ + INIT_LIST_HEAD(&fctx->flip); + INIT_LIST_HEAD(&fctx->pending); + spin_lock_init(&fctx->lock); +} + +static void +nouveau_fence_update(struct nouveau_channel *chan) +{ + struct nouveau_fence_chan *fctx = chan->fence; + struct nouveau_fence *fence, *fnext; + + spin_lock(&fctx->lock); + list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { + if (fctx->read(chan) < fence->sequence) + break; + + fence->channel = NULL; + list_del(&fence->head); + nouveau_fence_unref(&fence); + } + spin_unlock(&fctx->lock); +} + +int +nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) +{ + struct nouveau_fence_chan *fctx = chan->fence; + int ret; + + fence->channel = chan; + fence->timeout = jiffies + (3 * DRM_HZ); + fence->sequence = ++fctx->sequence; + + ret = fctx->emit(fence); + if (!ret) { + kref_get(&fence->kref); + spin_lock(&fctx->lock); + list_add_tail(&fence->head, &fctx->pending); + spin_unlock(&fctx->lock); + } + + return ret; +} + +bool +nouveau_fence_done(struct nouveau_fence *fence) +{ + if (fence->channel) + nouveau_fence_update(fence->channel); + return !fence->channel; +} + +struct nouveau_fence_uevent { + struct nouveau_eventh handler; + struct nouveau_fence_priv *priv; +}; + +static int +nouveau_fence_wait_uevent_handler(struct nouveau_eventh *event, int index) +{ + struct nouveau_fence_uevent *uevent = + container_of(event, struct nouveau_fence_uevent, handler); + wake_up_all(&uevent->priv->waiting); + return NVKM_EVENT_KEEP; +} + +static int +nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr) + +{ + struct nouveau_channel *chan = fence->channel; + struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device); + struct nouveau_fence_priv *priv = chan->drm->fence; + struct nouveau_fence_uevent uevent = { + .handler.func = nouveau_fence_wait_uevent_handler, + .priv = priv, + }; + int ret = 0; + + nouveau_event_get(pfifo->uevent, 0, &uevent.handler); + + if (fence->timeout) { + unsigned long timeout = fence->timeout - jiffies; + + if (time_before(jiffies, fence->timeout)) { + if (intr) { + ret = wait_event_interruptible_timeout( + priv->waiting, + nouveau_fence_done(fence), + timeout); + } else { + ret = wait_event_timeout(priv->waiting, + nouveau_fence_done(fence), + timeout); + } + } + + if (ret >= 0) { + fence->timeout = jiffies + ret; + if (time_after_eq(jiffies, fence->timeout)) + ret = -EBUSY; + } + } else { + if (intr) { + ret = wait_event_interruptible(priv->waiting, + nouveau_fence_done(fence)); + } else { + wait_event(priv->waiting, nouveau_fence_done(fence)); + } + } + + nouveau_event_put(pfifo->uevent, 0, &uevent.handler); + if (unlikely(ret < 0)) + return ret; + + return 0; +} + +int +nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) +{ + struct nouveau_channel *chan = fence->channel; + struct nouveau_fence_priv *priv = chan ? chan->drm->fence : NULL; + unsigned long sleep_time = NSEC_PER_MSEC / 1000; + ktime_t t; + int ret = 0; + + while (priv && priv->uevent && lazy && !nouveau_fence_done(fence)) { + ret = nouveau_fence_wait_uevent(fence, intr); + if (ret < 0) + return ret; + } + + while (!nouveau_fence_done(fence)) { + if (fence->timeout && time_after_eq(jiffies, fence->timeout)) { + ret = -EBUSY; + break; + } + + __set_current_state(intr ? TASK_INTERRUPTIBLE : + TASK_UNINTERRUPTIBLE); + if (lazy) { + t = ktime_set(0, sleep_time); + schedule_hrtimeout(&t, HRTIMER_MODE_REL); + sleep_time *= 2; + if (sleep_time > NSEC_PER_MSEC) + sleep_time = NSEC_PER_MSEC; + } + + if (intr && signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + } + + __set_current_state(TASK_RUNNING); + return ret; +} + +int +nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan) +{ + struct nouveau_fence_chan *fctx = chan->fence; + struct nouveau_channel *prev; + int ret = 0; + + prev = fence ? fence->channel : NULL; + if (prev) { + if (unlikely(prev != chan && !nouveau_fence_done(fence))) { + ret = fctx->sync(fence, prev, chan); + if (unlikely(ret)) + ret = nouveau_fence_wait(fence, true, false); + } + } + + return ret; +} + +static void +nouveau_fence_del(struct kref *kref) +{ + struct nouveau_fence *fence = container_of(kref, typeof(*fence), kref); + kfree(fence); +} + +void +nouveau_fence_unref(struct nouveau_fence **pfence) +{ + if (*pfence) + kref_put(&(*pfence)->kref, nouveau_fence_del); + *pfence = NULL; +} + +struct nouveau_fence * +nouveau_fence_ref(struct nouveau_fence *fence) +{ + kref_get(&fence->kref); + return fence; +} + +int +nouveau_fence_new(struct nouveau_channel *chan, bool sysmem, + struct nouveau_fence **pfence) +{ + struct nouveau_fence *fence; + int ret = 0; + + if (unlikely(!chan->fence)) + return -ENODEV; + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (!fence) + return -ENOMEM; + + fence->sysmem = sysmem; + kref_init(&fence->kref); + + ret = nouveau_fence_emit(fence, chan); + if (ret) + nouveau_fence_unref(&fence); + + *pfence = fence; + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h new file mode 100644 index 0000000..c899434 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -0,0 +1,96 @@ +#ifndef __NOUVEAU_FENCE_H__ +#define __NOUVEAU_FENCE_H__ + +struct nouveau_drm; + +struct nouveau_fence { + struct list_head head; + struct kref kref; + + bool sysmem; + + struct nouveau_channel *channel; + unsigned long timeout; + u32 sequence; +}; + +int nouveau_fence_new(struct nouveau_channel *, bool sysmem, + struct nouveau_fence **); +struct nouveau_fence * +nouveau_fence_ref(struct nouveau_fence *); +void nouveau_fence_unref(struct nouveau_fence **); + +int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *); +bool nouveau_fence_done(struct nouveau_fence *); +int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); +int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); + +struct nouveau_fence_chan { + struct list_head pending; + struct list_head flip; + + int (*emit)(struct nouveau_fence *); + int (*sync)(struct nouveau_fence *, struct nouveau_channel *, + struct nouveau_channel *); + u32 (*read)(struct nouveau_channel *); + int (*emit32)(struct nouveau_channel *, u64, u32); + int (*sync32)(struct nouveau_channel *, u64, u32); + + spinlock_t lock; + u32 sequence; +}; + +struct nouveau_fence_priv { + void (*dtor)(struct nouveau_drm *); + bool (*suspend)(struct nouveau_drm *); + void (*resume)(struct nouveau_drm *); + int (*context_new)(struct nouveau_channel *); + void (*context_del)(struct nouveau_channel *); + + wait_queue_head_t waiting; + bool uevent; +}; + +#define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence) + +void nouveau_fence_context_new(struct nouveau_fence_chan *); +void nouveau_fence_context_del(struct nouveau_fence_chan *); + +int nv04_fence_create(struct nouveau_drm *); +int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32); + +int nv10_fence_emit(struct nouveau_fence *); +int nv17_fence_sync(struct nouveau_fence *, struct nouveau_channel *, + struct nouveau_channel *); +u32 nv10_fence_read(struct nouveau_channel *); +void nv10_fence_context_del(struct nouveau_channel *); +void nv10_fence_destroy(struct nouveau_drm *); +int nv10_fence_create(struct nouveau_drm *); + +int nv17_fence_create(struct nouveau_drm *); +void nv17_fence_resume(struct nouveau_drm *drm); + +int nv50_fence_create(struct nouveau_drm *); +int nv84_fence_create(struct nouveau_drm *); +int nvc0_fence_create(struct nouveau_drm *); + +int nouveau_flip_complete(void *chan); + +struct nv84_fence_chan { + struct nouveau_fence_chan base; + struct nouveau_vma vma; + struct nouveau_vma vma_gart; + struct nouveau_vma dispc_vma[4]; +}; + +struct nv84_fence_priv { + struct nouveau_fence_priv base; + struct nouveau_bo *bo; + struct nouveau_bo *bo_gart; + u32 *suspend; +}; + +u64 nv84_fence_crtc(struct nouveau_channel *, int); +int nv84_fence_context_new(struct nouveau_channel *); + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c new file mode 100644 index 0000000..b4b4d0c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -0,0 +1,890 @@ +/* + * Copyright (C) 2008 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <subdev/fb.h> + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_fence.h" +#include "nouveau_abi16.h" + +#include "nouveau_ttm.h" +#include "nouveau_gem.h" + +int +nouveau_gem_object_new(struct drm_gem_object *gem) +{ + return 0; +} + +void +nouveau_gem_object_del(struct drm_gem_object *gem) +{ + struct nouveau_bo *nvbo = gem->driver_private; + struct ttm_buffer_object *bo = &nvbo->bo; + + if (!nvbo) + return; + nvbo->gem = NULL; + + if (unlikely(nvbo->pin_refcnt)) { + nvbo->pin_refcnt = 1; + nouveau_bo_unpin(nvbo); + } + + if (gem->import_attach) + drm_prime_gem_destroy(gem, nvbo->bo.sg); + + ttm_bo_unref(&bo); + + drm_gem_object_release(gem); + kfree(gem); +} + +int +nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) +{ + struct nouveau_cli *cli = nouveau_cli(file_priv); + struct nouveau_bo *nvbo = nouveau_gem_object(gem); + struct nouveau_vma *vma; + int ret; + + if (!cli->base.vm) + return 0; + + ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); + if (ret) + return ret; + + vma = nouveau_bo_vma_find(nvbo, cli->base.vm); + if (!vma) { + vma = kzalloc(sizeof(*vma), GFP_KERNEL); + if (!vma) { + ret = -ENOMEM; + goto out; + } + + ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma); + if (ret) { + kfree(vma); + goto out; + } + } else { + vma->refcount++; + } + +out: + ttm_bo_unreserve(&nvbo->bo); + return ret; +} + +void +nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) +{ + struct nouveau_cli *cli = nouveau_cli(file_priv); + struct nouveau_bo *nvbo = nouveau_gem_object(gem); + struct nouveau_vma *vma; + int ret; + + if (!cli->base.vm) + return; + + ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); + if (ret) + return; + + vma = nouveau_bo_vma_find(nvbo, cli->base.vm); + if (vma) { + if (--vma->refcount == 0) { + nouveau_bo_vma_del(nvbo, vma); + kfree(vma); + } + } + ttm_bo_unreserve(&nvbo->bo); +} + +int +nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, + uint32_t tile_mode, uint32_t tile_flags, + struct nouveau_bo **pnvbo) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_bo *nvbo; + u32 flags = 0; + int ret; + + if (domain & NOUVEAU_GEM_DOMAIN_VRAM) + flags |= TTM_PL_FLAG_VRAM; + if (domain & NOUVEAU_GEM_DOMAIN_GART) + flags |= TTM_PL_FLAG_TT; + if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) + flags |= TTM_PL_FLAG_SYSTEM; + + ret = nouveau_bo_new(dev, size, align, flags, tile_mode, + tile_flags, NULL, pnvbo); + if (ret) + return ret; + nvbo = *pnvbo; + + /* we restrict allowed domains on nv50+ to only the types + * that were requested at creation time. not possibly on + * earlier chips without busting the ABI. + */ + nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | + NOUVEAU_GEM_DOMAIN_GART; + if (nv_device(drm->device)->card_type >= NV_50) + nvbo->valid_domains &= domain; + + nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); + if (!nvbo->gem) { + nouveau_bo_ref(NULL, pnvbo); + return -ENOMEM; + } + + nvbo->bo.persistent_swap_storage = nvbo->gem->filp; + nvbo->gem->driver_private = nvbo; + return 0; +} + +static int +nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, + struct drm_nouveau_gem_info *rep) +{ + struct nouveau_cli *cli = nouveau_cli(file_priv); + struct nouveau_bo *nvbo = nouveau_gem_object(gem); + struct nouveau_vma *vma; + + if (nvbo->bo.mem.mem_type == TTM_PL_TT) + rep->domain = NOUVEAU_GEM_DOMAIN_GART; + else + rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; + + rep->offset = nvbo->bo.offset; + if (cli->base.vm) { + vma = nouveau_bo_vma_find(nvbo, cli->base.vm); + if (!vma) + return -EINVAL; + + rep->offset = vma->offset; + } + + rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; + rep->map_handle = nvbo->bo.addr_space_offset; + rep->tile_mode = nvbo->tile_mode; + rep->tile_flags = nvbo->tile_flags; + return 0; +} + +int +nouveau_gem_ioctl_new(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_cli *cli = nouveau_cli(file_priv); + struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct drm_nouveau_gem_new *req = data; + struct nouveau_bo *nvbo = NULL; + int ret = 0; + + drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping; + + if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { + NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags); + return -EINVAL; + } + + ret = nouveau_gem_new(dev, req->info.size, req->align, + req->info.domain, req->info.tile_mode, + req->info.tile_flags, &nvbo); + if (ret) + return ret; + + ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); + if (ret == 0) { + ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info); + if (ret) + drm_gem_handle_delete(file_priv, req->info.handle); + } + + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(nvbo->gem); + return ret; +} + +static int +nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, + uint32_t write_domains, uint32_t valid_domains) +{ + struct nouveau_bo *nvbo = gem->driver_private; + struct ttm_buffer_object *bo = &nvbo->bo; + uint32_t domains = valid_domains & nvbo->valid_domains & + (write_domains ? write_domains : read_domains); + uint32_t pref_flags = 0, valid_flags = 0; + + if (!domains) + return -EINVAL; + + if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) + valid_flags |= TTM_PL_FLAG_VRAM; + + if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) + valid_flags |= TTM_PL_FLAG_TT; + + if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && + bo->mem.mem_type == TTM_PL_VRAM) + pref_flags |= TTM_PL_FLAG_VRAM; + + else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && + bo->mem.mem_type == TTM_PL_TT) + pref_flags |= TTM_PL_FLAG_TT; + + else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) + pref_flags |= TTM_PL_FLAG_VRAM; + + else + pref_flags |= TTM_PL_FLAG_TT; + + nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); + + return 0; +} + +struct validate_op { + struct list_head vram_list; + struct list_head gart_list; + struct list_head both_list; +}; + +static void +validate_fini_list(struct list_head *list, struct nouveau_fence *fence) +{ + struct list_head *entry, *tmp; + struct nouveau_bo *nvbo; + + list_for_each_safe(entry, tmp, list) { + nvbo = list_entry(entry, struct nouveau_bo, entry); + + nouveau_bo_fence(nvbo, fence); + + if (unlikely(nvbo->validate_mapped)) { + ttm_bo_kunmap(&nvbo->kmap); + nvbo->validate_mapped = false; + } + + list_del(&nvbo->entry); + nvbo->reserved_by = NULL; + ttm_bo_unreserve(&nvbo->bo); + drm_gem_object_unreference_unlocked(nvbo->gem); + } +} + +static void +validate_fini(struct validate_op *op, struct nouveau_fence* fence) +{ + validate_fini_list(&op->vram_list, fence); + validate_fini_list(&op->gart_list, fence); + validate_fini_list(&op->both_list, fence); +} + +static int +validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, + struct drm_nouveau_gem_pushbuf_bo *pbbo, + int nr_buffers, struct validate_op *op) +{ + struct nouveau_cli *cli = nouveau_cli(file_priv); + struct drm_device *dev = chan->drm->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + uint32_t sequence; + int trycnt = 0; + int ret, i; + struct nouveau_bo *res_bo = NULL; + + sequence = atomic_add_return(1, &drm->ttm.validate_sequence); +retry: + if (++trycnt > 100000) { + NV_ERROR(cli, "%s failed and gave up.\n", __func__); + return -EINVAL; + } + + for (i = 0; i < nr_buffers; i++) { + struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i]; + struct drm_gem_object *gem; + struct nouveau_bo *nvbo; + + gem = drm_gem_object_lookup(dev, file_priv, b->handle); + if (!gem) { + NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle); + validate_fini(op, NULL); + return -ENOENT; + } + nvbo = gem->driver_private; + if (nvbo == res_bo) { + res_bo = NULL; + drm_gem_object_unreference_unlocked(gem); + continue; + } + + if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { + NV_ERROR(cli, "multiple instances of buffer %d on " + "validation list\n", b->handle); + drm_gem_object_unreference_unlocked(gem); + validate_fini(op, NULL); + return -EINVAL; + } + + ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence); + if (ret) { + validate_fini(op, NULL); + if (unlikely(ret == -EAGAIN)) { + sequence = atomic_add_return(1, &drm->ttm.validate_sequence); + ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, + sequence); + if (!ret) + res_bo = nvbo; + } + if (unlikely(ret)) { + drm_gem_object_unreference_unlocked(gem); + if (ret != -ERESTARTSYS) + NV_ERROR(cli, "fail reserve\n"); + return ret; + } + } + + b->user_priv = (uint64_t)(unsigned long)nvbo; + nvbo->reserved_by = file_priv; + nvbo->pbbo_index = i; + if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && + (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)) + list_add_tail(&nvbo->entry, &op->both_list); + else + if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) + list_add_tail(&nvbo->entry, &op->vram_list); + else + if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) + list_add_tail(&nvbo->entry, &op->gart_list); + else { + NV_ERROR(cli, "invalid valid domains: 0x%08x\n", + b->valid_domains); + list_add_tail(&nvbo->entry, &op->both_list); + validate_fini(op, NULL); + return -EINVAL; + } + if (nvbo == res_bo) + goto retry; + } + + return 0; +} + +static int +validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) +{ + struct nouveau_fence *fence = NULL; + int ret = 0; + + spin_lock(&nvbo->bo.bdev->fence_lock); + if (nvbo->bo.sync_obj) + fence = nouveau_fence_ref(nvbo->bo.sync_obj); + spin_unlock(&nvbo->bo.bdev->fence_lock); + + if (fence) { + ret = nouveau_fence_sync(fence, chan); + nouveau_fence_unref(&fence); + } + + return ret; +} + +static int +validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, + struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo, + uint64_t user_pbbo_ptr) +{ + struct nouveau_drm *drm = chan->drm; + struct drm_nouveau_gem_pushbuf_bo __user *upbbo = + (void __force __user *)(uintptr_t)user_pbbo_ptr; + struct nouveau_bo *nvbo; + int ret, relocs = 0; + + list_for_each_entry(nvbo, list, entry) { + struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; + + ret = validate_sync(chan, nvbo); + if (unlikely(ret)) { + NV_ERROR(cli, "fail pre-validate sync\n"); + return ret; + } + + ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, + b->write_domains, + b->valid_domains); + if (unlikely(ret)) { + NV_ERROR(cli, "fail set_domain\n"); + return ret; + } + + ret = nouveau_bo_validate(nvbo, true, false); + if (unlikely(ret)) { + if (ret != -ERESTARTSYS) + NV_ERROR(cli, "fail ttm_validate\n"); + return ret; + } + + ret = validate_sync(chan, nvbo); + if (unlikely(ret)) { + NV_ERROR(cli, "fail post-validate sync\n"); + return ret; + } + + if (nv_device(drm->device)->card_type < NV_50) { + if (nvbo->bo.offset == b->presumed.offset && + ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && + b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || + (nvbo->bo.mem.mem_type == TTM_PL_TT && + b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) + continue; + + if (nvbo->bo.mem.mem_type == TTM_PL_TT) + b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; + else + b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; + b->presumed.offset = nvbo->bo.offset; + b->presumed.valid = 0; + relocs++; + + if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, + &b->presumed, sizeof(b->presumed))) + return -EFAULT; + } + } + + return relocs; +} + +static int +nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, + struct drm_file *file_priv, + struct drm_nouveau_gem_pushbuf_bo *pbbo, + uint64_t user_buffers, int nr_buffers, + struct validate_op *op, int *apply_relocs) +{ + struct nouveau_cli *cli = nouveau_cli(file_priv); + int ret, relocs = 0; + + INIT_LIST_HEAD(&op->vram_list); + INIT_LIST_HEAD(&op->gart_list); + INIT_LIST_HEAD(&op->both_list); + + if (nr_buffers == 0) + return 0; + + ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); + if (unlikely(ret)) { + if (ret != -ERESTARTSYS) + NV_ERROR(cli, "validate_init\n"); + return ret; + } + + ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers); + if (unlikely(ret < 0)) { + if (ret != -ERESTARTSYS) + NV_ERROR(cli, "validate vram_list\n"); + validate_fini(op, NULL); + return ret; + } + relocs += ret; + + ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers); + if (unlikely(ret < 0)) { + if (ret != -ERESTARTSYS) + NV_ERROR(cli, "validate gart_list\n"); + validate_fini(op, NULL); + return ret; + } + relocs += ret; + + ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers); + if (unlikely(ret < 0)) { + if (ret != -ERESTARTSYS) + NV_ERROR(cli, "validate both_list\n"); + validate_fini(op, NULL); + return ret; + } + relocs += ret; + + *apply_relocs = relocs; + return 0; +} + +static inline void * +u_memcpya(uint64_t user, unsigned nmemb, unsigned size) +{ + void *mem; + void __user *userptr = (void __force __user *)(uintptr_t)user; + + mem = kmalloc(nmemb * size, GFP_KERNEL); + if (!mem) + return ERR_PTR(-ENOMEM); + + if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) { + kfree(mem); + return ERR_PTR(-EFAULT); + } + + return mem; +} + +static int +nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, + struct drm_nouveau_gem_pushbuf *req, + struct drm_nouveau_gem_pushbuf_bo *bo) +{ + struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; + int ret = 0; + unsigned i; + + reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); + if (IS_ERR(reloc)) + return PTR_ERR(reloc); + + for (i = 0; i < req->nr_relocs; i++) { + struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; + struct drm_nouveau_gem_pushbuf_bo *b; + struct nouveau_bo *nvbo; + uint32_t data; + + if (unlikely(r->bo_index > req->nr_buffers)) { + NV_ERROR(cli, "reloc bo index invalid\n"); + ret = -EINVAL; + break; + } + + b = &bo[r->bo_index]; + if (b->presumed.valid) + continue; + + if (unlikely(r->reloc_bo_index > req->nr_buffers)) { + NV_ERROR(cli, "reloc container bo index invalid\n"); + ret = -EINVAL; + break; + } + nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; + + if (unlikely(r->reloc_bo_offset + 4 > + nvbo->bo.mem.num_pages << PAGE_SHIFT)) { + NV_ERROR(cli, "reloc outside of bo\n"); + ret = -EINVAL; + break; + } + + if (!nvbo->kmap.virtual) { + ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, + &nvbo->kmap); + if (ret) { + NV_ERROR(cli, "failed kmap for reloc\n"); + break; + } + nvbo->validate_mapped = true; + } + + if (r->flags & NOUVEAU_GEM_RELOC_LOW) + data = b->presumed.offset + r->data; + else + if (r->flags & NOUVEAU_GEM_RELOC_HIGH) + data = (b->presumed.offset + r->data) >> 32; + else + data = r->data; + + if (r->flags & NOUVEAU_GEM_RELOC_OR) { + if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART) + data |= r->tor; + else + data |= r->vor; + } + + spin_lock(&nvbo->bo.bdev->fence_lock); + ret = ttm_bo_wait(&nvbo->bo, false, false, false); + spin_unlock(&nvbo->bo.bdev->fence_lock); + if (ret) { + NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret); + break; + } + + nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); + } + + kfree(reloc); + return ret; +} + +int +nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); + struct nouveau_cli *cli = nouveau_cli(file_priv); + struct nouveau_abi16_chan *temp; + struct nouveau_drm *drm = nouveau_drm(dev); + struct drm_nouveau_gem_pushbuf *req = data; + struct drm_nouveau_gem_pushbuf_push *push; + struct drm_nouveau_gem_pushbuf_bo *bo; + struct nouveau_channel *chan = NULL; + struct validate_op op; + struct nouveau_fence *fence = NULL; + int i, j, ret = 0, do_reloc = 0; + + if (unlikely(!abi16)) + return -ENOMEM; + + list_for_each_entry(temp, &abi16->channels, head) { + if (temp->chan->handle == (NVDRM_CHAN | req->channel)) { + chan = temp->chan; + break; + } + } + + if (!chan) + return nouveau_abi16_put(abi16, -ENOENT); + + req->vram_available = drm->gem.vram_available; + req->gart_available = drm->gem.gart_available; + if (unlikely(req->nr_push == 0)) + goto out_next; + + if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { + NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n", + req->nr_push, NOUVEAU_GEM_MAX_PUSH); + return nouveau_abi16_put(abi16, -EINVAL); + } + + if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { + NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n", + req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); + return nouveau_abi16_put(abi16, -EINVAL); + } + + if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { + NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n", + req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); + return nouveau_abi16_put(abi16, -EINVAL); + } + + push = u_memcpya(req->push, req->nr_push, sizeof(*push)); + if (IS_ERR(push)) + return nouveau_abi16_put(abi16, PTR_ERR(push)); + + bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); + if (IS_ERR(bo)) { + kfree(push); + return nouveau_abi16_put(abi16, PTR_ERR(bo)); + } + + /* Ensure all push buffers are on validate list */ + for (i = 0; i < req->nr_push; i++) { + if (push[i].bo_index >= req->nr_buffers) { + NV_ERROR(cli, "push %d buffer not in list\n", i); + ret = -EINVAL; + goto out_prevalid; + } + } + + /* Validate buffer list */ + ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, + req->nr_buffers, &op, &do_reloc); + if (ret) { + if (ret != -ERESTARTSYS) + NV_ERROR(cli, "validate: %d\n", ret); + goto out_prevalid; + } + + /* Apply any relocations that are required */ + if (do_reloc) { + ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); + if (ret) { + NV_ERROR(cli, "reloc apply: %d\n", ret); + goto out; + } + } + + if (chan->dma.ib_max) { + ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); + if (ret) { + NV_ERROR(cli, "nv50cal_space: %d\n", ret); + goto out; + } + + for (i = 0; i < req->nr_push; i++) { + struct nouveau_bo *nvbo = (void *)(unsigned long) + bo[push[i].bo_index].user_priv; + + nv50_dma_push(chan, nvbo, push[i].offset, + push[i].length); + } + } else + if (nv_device(drm->device)->chipset >= 0x25) { + ret = RING_SPACE(chan, req->nr_push * 2); + if (ret) { + NV_ERROR(cli, "cal_space: %d\n", ret); + goto out; + } + + for (i = 0; i < req->nr_push; i++) { + struct nouveau_bo *nvbo = (void *)(unsigned long) + bo[push[i].bo_index].user_priv; + + OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2); + OUT_RING(chan, 0); + } + } else { + ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); + if (ret) { + NV_ERROR(cli, "jmp_space: %d\n", ret); + goto out; + } + + for (i = 0; i < req->nr_push; i++) { + struct nouveau_bo *nvbo = (void *)(unsigned long) + bo[push[i].bo_index].user_priv; + uint32_t cmd; + + cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2); + cmd |= 0x20000000; + if (unlikely(cmd != req->suffix0)) { + if (!nvbo->kmap.virtual) { + ret = ttm_bo_kmap(&nvbo->bo, 0, + nvbo->bo.mem. + num_pages, + &nvbo->kmap); + if (ret) { + WIND_RING(chan); + goto out; + } + nvbo->validate_mapped = true; + } + + nouveau_bo_wr32(nvbo, (push[i].offset + + push[i].length - 8) / 4, cmd); + } + + OUT_RING(chan, 0x20000000 | + (nvbo->bo.offset + push[i].offset)); + OUT_RING(chan, 0); + for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) + OUT_RING(chan, 0); + } + } + + ret = nouveau_fence_new(chan, false, &fence); + if (ret) { + NV_ERROR(cli, "error fencing pushbuf: %d\n", ret); + WIND_RING(chan); + goto out; + } + +out: + validate_fini(&op, fence); + nouveau_fence_unref(&fence); + +out_prevalid: + kfree(bo); + kfree(push); + +out_next: + if (chan->dma.ib_max) { + req->suffix0 = 0x00000000; + req->suffix1 = 0x00000000; + } else + if (nv_device(drm->device)->chipset >= 0x25) { + req->suffix0 = 0x00020000; + req->suffix1 = 0x00000000; + } else { + req->suffix0 = 0x20000000 | + (chan->push.vma.offset + ((chan->dma.cur + 2) << 2)); + req->suffix1 = 0x00000000; + } + + return nouveau_abi16_put(abi16, ret); +} + +static inline uint32_t +domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) +{ + uint32_t flags = 0; + + if (domain & NOUVEAU_GEM_DOMAIN_VRAM) + flags |= TTM_PL_FLAG_VRAM; + if (domain & NOUVEAU_GEM_DOMAIN_GART) + flags |= TTM_PL_FLAG_TT; + + return flags; +} + +int +nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_gem_cpu_prep *req = data; + struct drm_gem_object *gem; + struct nouveau_bo *nvbo; + bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); + int ret = -EINVAL; + + gem = drm_gem_object_lookup(dev, file_priv, req->handle); + if (!gem) + return -ENOENT; + nvbo = nouveau_gem_object(gem); + + spin_lock(&nvbo->bo.bdev->fence_lock); + ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait); + spin_unlock(&nvbo->bo.bdev->fence_lock); + drm_gem_object_unreference_unlocked(gem); + return ret; +} + +int +nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return 0; +} + +int +nouveau_gem_ioctl_info(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_nouveau_gem_info *req = data; + struct drm_gem_object *gem; + int ret; + + gem = drm_gem_object_lookup(dev, file_priv, req->handle); + if (!gem) + return -ENOENT; + + ret = nouveau_gem_info(file_priv, gem, req); + drm_gem_object_unreference_unlocked(gem); + return ret; +} + diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h new file mode 100644 index 0000000..8d7a3f0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_gem.h @@ -0,0 +1,45 @@ +#ifndef __NOUVEAU_GEM_H__ +#define __NOUVEAU_GEM_H__ + +#include <drm/drmP.h> + +#include "nouveau_drm.h" +#include "nouveau_bo.h" + +#define nouveau_bo_tile_layout(nvbo) \ + ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) + +static inline struct nouveau_bo * +nouveau_gem_object(struct drm_gem_object *gem) +{ + return gem ? gem->driver_private : NULL; +} + +/* nouveau_gem.c */ +extern int nouveau_gem_new(struct drm_device *, int size, int align, + uint32_t domain, uint32_t tile_mode, + uint32_t tile_flags, struct nouveau_bo **); +extern int nouveau_gem_object_new(struct drm_gem_object *); +extern void nouveau_gem_object_del(struct drm_gem_object *); +extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *); +extern void nouveau_gem_object_close(struct drm_gem_object *, + struct drm_file *); +extern int nouveau_gem_ioctl_new(struct drm_device *, void *, + struct drm_file *); +extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *, + struct drm_file *); +extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *, + struct drm_file *); +extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *, + struct drm_file *); +extern int nouveau_gem_ioctl_info(struct drm_device *, void *, + struct drm_file *); + +extern int nouveau_gem_prime_pin(struct drm_gem_object *); +extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *); +extern struct drm_gem_object *nouveau_gem_prime_import_sg_table( + struct drm_device *, size_t size, struct sg_table *); +extern void *nouveau_gem_prime_vmap(struct drm_gem_object *); +extern void nouveau_gem_prime_vunmap(struct drm_gem_object *, void *); + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_hwsq.h b/drivers/gpu/drm/nouveau/nouveau_hwsq.h new file mode 100644 index 0000000..6976875 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_hwsq.h @@ -0,0 +1,115 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#ifndef __NOUVEAU_HWSQ_H__ +#define __NOUVEAU_HWSQ_H__ + +struct hwsq_ucode { + u8 data[0x200]; + union { + u8 *u08; + u16 *u16; + u32 *u32; + } ptr; + u16 len; + + u32 reg; + u32 val; +}; + +static inline void +hwsq_init(struct hwsq_ucode *hwsq) +{ + hwsq->ptr.u08 = hwsq->data; + hwsq->reg = 0xffffffff; + hwsq->val = 0xffffffff; +} + +static inline void +hwsq_fini(struct hwsq_ucode *hwsq) +{ + do { + *hwsq->ptr.u08++ = 0x7f; + hwsq->len = hwsq->ptr.u08 - hwsq->data; + } while (hwsq->len & 3); + hwsq->ptr.u08 = hwsq->data; +} + +static inline void +hwsq_usec(struct hwsq_ucode *hwsq, u8 usec) +{ + u32 shift = 0; + while (usec & ~3) { + usec >>= 2; + shift++; + } + + *hwsq->ptr.u08++ = (shift << 2) | usec; +} + +static inline void +hwsq_setf(struct hwsq_ucode *hwsq, u8 flag, int val) +{ + flag += 0x80; + if (val >= 0) + flag += 0x20; + if (val >= 1) + flag += 0x20; + *hwsq->ptr.u08++ = flag; +} + +static inline void +hwsq_op5f(struct hwsq_ucode *hwsq, u8 v0, u8 v1) +{ + *hwsq->ptr.u08++ = 0x5f; + *hwsq->ptr.u08++ = v0; + *hwsq->ptr.u08++ = v1; +} + +static inline void +hwsq_wr32(struct hwsq_ucode *hwsq, u32 reg, u32 val) +{ + if (val != hwsq->val) { + if ((val & 0xffff0000) == (hwsq->val & 0xffff0000)) { + *hwsq->ptr.u08++ = 0x42; + *hwsq->ptr.u16++ = (val & 0x0000ffff); + } else { + *hwsq->ptr.u08++ = 0xe2; + *hwsq->ptr.u32++ = val; + } + + hwsq->val = val; + } + + if ((reg & 0xffff0000) == (hwsq->reg & 0xffff0000)) { + *hwsq->ptr.u08++ = 0x40; + *hwsq->ptr.u16++ = (reg & 0x0000ffff); + } else { + *hwsq->ptr.u08++ = 0xe0; + *hwsq->ptr.u32++ = reg; + } + hwsq->reg = reg; +} + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c new file mode 100644 index 0000000..08214bc --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c @@ -0,0 +1,69 @@ +/** + * \file mga_ioc32.c + * + * 32-bit ioctl compatibility routines for the MGA DRM. + * + * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich + * + * + * Copyright (C) Paul Mackerras 2005 + * Copyright (C) Egbert Eich 2003,2004 + * Copyright (C) Dave Airlie 2005 + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include <linux/compat.h> + +#include <drm/drmP.h> + +#include "nouveau_ioctl.h" + +/** + * Called whenever a 32-bit process running under a 64-bit kernel + * performs an ioctl on /dev/dri/card<n>. + * + * \param filp file pointer. + * \param cmd command. + * \param arg user argument. + * \return zero on success or negative number on failure. + */ +long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + unsigned int nr = DRM_IOCTL_NR(cmd); + drm_ioctl_compat_t *fn = NULL; + int ret; + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(filp, cmd, arg); + +#if 0 + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) + fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE]; +#endif + if (fn != NULL) + ret = (*fn)(filp, cmd, arg); + else + ret = drm_ioctl(filp, cmd, arg); + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_ioctl.h b/drivers/gpu/drm/nouveau/nouveau_ioctl.h new file mode 100644 index 0000000..ef2b290 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_ioctl.h @@ -0,0 +1,6 @@ +#ifndef __NOUVEAU_IOCTL_H__ +#define __NOUVEAU_IOCTL_H__ + +long nouveau_compat_ioctl(struct file *, unsigned int cmd, unsigned long arg); + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c new file mode 100644 index 0000000..7e0ff10 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -0,0 +1,647 @@ +/* + * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. + * Copyright 2005 Stephane Marchesin + * + * The Weather Channel (TM) funded Tungsten Graphics to develop the + * initial release of the Radeon 8500 driver under the XFree86 license. + * This notice must be preserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Ben Skeggs <bskeggs@redhat.com> + * Roy Spliet <r.spliet@student.tudelft.nl> + */ + +#include "nouveau_drm.h" +#include "nouveau_pm.h" + +#include <subdev/fb.h> + +static int +nv40_mem_timing_calc(struct drm_device *dev, u32 freq, + struct nouveau_pm_tbl_entry *e, u8 len, + struct nouveau_pm_memtiming *boot, + struct nouveau_pm_memtiming *t) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC); + + /* XXX: I don't trust the -1's and +1's... they must come + * from somewhere! */ + t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 | + 1 << 16 | + (e->tWTR + 2 + (t->tCWL - 1)) << 8 | + (e->tCL + 2 - (t->tCWL - 1)); + + t->reg[2] = 0x20200000 | + ((t->tCWL - 1) << 24 | + e->tRRD << 16 | + e->tRCDWR << 8 | + e->tRCDRD); + + NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x\n", t->id, + t->reg[0], t->reg[1], t->reg[2]); + return 0; +} + +static int +nv50_mem_timing_calc(struct drm_device *dev, u32 freq, + struct nouveau_pm_tbl_entry *e, u8 len, + struct nouveau_pm_memtiming *boot, + struct nouveau_pm_memtiming *t) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_fb *pfb = nouveau_fb(device); + struct nouveau_drm *drm = nouveau_drm(dev); + struct bit_entry P; + uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3; + + if (bit_table(dev, 'P', &P)) + return -EINVAL; + + switch (min(len, (u8) 22)) { + case 22: + unk21 = e->tUNK_21; + case 21: + unk20 = e->tUNK_20; + case 20: + if (e->tCWL > 0) + t->tCWL = e->tCWL; + case 19: + unk18 = e->tUNK_18; + break; + } + + t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC); + + t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 | + max(unk18, (u8) 1) << 16 | + (e->tWTR + 2 + (t->tCWL - 1)) << 8; + + t->reg[2] = ((t->tCWL - 1) << 24 | + e->tRRD << 16 | + e->tRCDWR << 8 | + e->tRCDRD); + + t->reg[4] = e->tUNK_13 << 8 | e->tUNK_13; + + t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP); + + t->reg[8] = boot->reg[8] & 0xffffff00; + + if (P.version == 1) { + t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1)); + + t->reg[3] = (0x14 + e->tCL) << 24 | + 0x16 << 16 | + (e->tCL - 1) << 8 | + (e->tCL - 1); + + t->reg[4] |= boot->reg[4] & 0xffff0000; + + t->reg[6] = (0x33 - t->tCWL) << 16 | + t->tCWL << 8 | + (0x2e + e->tCL - t->tCWL); + + t->reg[7] = 0x4000202 | (e->tCL - 1) << 16; + + /* XXX: P.version == 1 only has DDR2 and GDDR3? */ + if (pfb->ram.type == NV_MEM_TYPE_DDR2) { + t->reg[5] |= (e->tCL + 3) << 8; + t->reg[6] |= (t->tCWL - 2) << 8; + t->reg[8] |= (e->tCL - 4); + } else { + t->reg[5] |= (e->tCL + 2) << 8; + t->reg[6] |= t->tCWL << 8; + t->reg[8] |= (e->tCL - 2); + } + } else { + t->reg[1] |= (5 + e->tCL - (t->tCWL)); + + /* XXX: 0xb? 0x30? */ + t->reg[3] = (0x30 + e->tCL) << 24 | + (boot->reg[3] & 0x00ff0000)| + (0xb + e->tCL) << 8 | + (e->tCL - 1); + + t->reg[4] |= (unk20 << 24 | unk21 << 16); + + /* XXX: +6? */ + t->reg[5] |= (t->tCWL + 6) << 8; + + t->reg[6] = (0x5a + e->tCL) << 16 | + (6 - e->tCL + t->tCWL) << 8 | + (0x50 + e->tCL - t->tCWL); + + tmp7_3 = (boot->reg[7] & 0xff000000) >> 24; + t->reg[7] = (tmp7_3 << 24) | + ((tmp7_3 - 6 + e->tCL) << 16) | + 0x202; + } + + NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x %08x\n", t->id, + t->reg[0], t->reg[1], t->reg[2], t->reg[3]); + NV_DEBUG(drm, " 230: %08x %08x %08x %08x\n", + t->reg[4], t->reg[5], t->reg[6], t->reg[7]); + NV_DEBUG(drm, " 240: %08x\n", t->reg[8]); + return 0; +} + +static int +nvc0_mem_timing_calc(struct drm_device *dev, u32 freq, + struct nouveau_pm_tbl_entry *e, u8 len, + struct nouveau_pm_memtiming *boot, + struct nouveau_pm_memtiming *t) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + if (e->tCWL > 0) + t->tCWL = e->tCWL; + + t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 | + e->tRFC << 8 | e->tRC); + + t->reg[1] = (boot->reg[1] & 0xff000000) | + (e->tRCDWR & 0x0f) << 20 | + (e->tRCDRD & 0x0f) << 14 | + (t->tCWL << 7) | + (e->tCL & 0x0f); + + t->reg[2] = (boot->reg[2] & 0xff0000ff) | + e->tWR << 16 | e->tWTR << 8; + + t->reg[3] = (e->tUNK_20 & 0x1f) << 9 | + (e->tUNK_21 & 0xf) << 5 | + (e->tUNK_13 & 0x1f); + + t->reg[4] = (boot->reg[4] & 0xfff00fff) | + (e->tRRD&0x1f) << 15; + + NV_DEBUG(drm, "Entry %d: 290: %08x %08x %08x %08x\n", t->id, + t->reg[0], t->reg[1], t->reg[2], t->reg[3]); + NV_DEBUG(drm, " 2a0: %08x\n", t->reg[4]); + return 0; +} + +/** + * MR generation methods + */ + +static int +nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq, + struct nouveau_pm_tbl_entry *e, u8 len, + struct nouveau_pm_memtiming *boot, + struct nouveau_pm_memtiming *t) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + t->drive_strength = 0; + if (len < 15) { + t->odt = boot->odt; + } else { + t->odt = e->RAM_FT1 & 0x07; + } + + if (e->tCL >= NV_MEM_CL_DDR2_MAX) { + NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL); + return -ERANGE; + } + + if (e->tWR >= NV_MEM_WR_DDR2_MAX) { + NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR); + return -ERANGE; + } + + if (t->odt > 3) { + NV_WARN(drm, "(%u) Invalid odt value, assuming disabled: %x", + t->id, t->odt); + t->odt = 0; + } + + t->mr[0] = (boot->mr[0] & 0x100f) | + (e->tCL) << 4 | + (e->tWR - 1) << 9; + t->mr[1] = (boot->mr[1] & 0x101fbb) | + (t->odt & 0x1) << 2 | + (t->odt & 0x2) << 5; + + NV_DEBUG(drm, "(%u) MR: %08x", t->id, t->mr[0]); + return 0; +} + +static const uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = { + 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0}; + +static int +nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq, + struct nouveau_pm_tbl_entry *e, u8 len, + struct nouveau_pm_memtiming *boot, + struct nouveau_pm_memtiming *t) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + u8 cl = e->tCL - 4; + + t->drive_strength = 0; + if (len < 15) { + t->odt = boot->odt; + } else { + t->odt = e->RAM_FT1 & 0x07; + } + + if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) { + NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL); + return -ERANGE; + } + + if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) { + NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR); + return -ERANGE; + } + + if (e->tCWL < 5) { + NV_WARN(drm, "(%u) Invalid tCWL: %u", t->id, e->tCWL); + return -ERANGE; + } + + t->mr[0] = (boot->mr[0] & 0x180b) | + /* CAS */ + (cl & 0x7) << 4 | + (cl & 0x8) >> 1 | + (nv_mem_wr_lut_ddr3[e->tWR]) << 9; + t->mr[1] = (boot->mr[1] & 0x101dbb) | + (t->odt & 0x1) << 2 | + (t->odt & 0x2) << 5 | + (t->odt & 0x4) << 7; + t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3; + + NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]); + return 0; +} + +static const uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = { + 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11}; +static const uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = { + 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3}; + +static int +nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq, + struct nouveau_pm_tbl_entry *e, u8 len, + struct nouveau_pm_memtiming *boot, + struct nouveau_pm_memtiming *t) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + if (len < 15) { + t->drive_strength = boot->drive_strength; + t->odt = boot->odt; + } else { + t->drive_strength = (e->RAM_FT1 & 0x30) >> 4; + t->odt = e->RAM_FT1 & 0x07; + } + + if (e->tCL >= NV_MEM_CL_GDDR3_MAX) { + NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL); + return -ERANGE; + } + + if (e->tWR >= NV_MEM_WR_GDDR3_MAX) { + NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR); + return -ERANGE; + } + + if (t->odt > 3) { + NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x", + t->id, t->odt); + t->odt = 0; + } + + t->mr[0] = (boot->mr[0] & 0xe0b) | + /* CAS */ + ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) | + ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2); + t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength | + (t->odt << 2) | + (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4; + t->mr[2] = boot->mr[2]; + + NV_DEBUG(drm, "(%u) MR: %08x %08x %08x", t->id, + t->mr[0], t->mr[1], t->mr[2]); + return 0; +} + +static int +nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq, + struct nouveau_pm_tbl_entry *e, u8 len, + struct nouveau_pm_memtiming *boot, + struct nouveau_pm_memtiming *t) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + if (len < 15) { + t->drive_strength = boot->drive_strength; + t->odt = boot->odt; + } else { + t->drive_strength = (e->RAM_FT1 & 0x30) >> 4; + t->odt = e->RAM_FT1 & 0x03; + } + + if (e->tCL >= NV_MEM_CL_GDDR5_MAX) { + NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL); + return -ERANGE; + } + + if (e->tWR >= NV_MEM_WR_GDDR5_MAX) { + NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR); + return -ERANGE; + } + + if (t->odt > 3) { + NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x", + t->id, t->odt); + t->odt = 0; + } + + t->mr[0] = (boot->mr[0] & 0x007) | + ((e->tCL - 5) << 3) | + ((e->tWR - 4) << 8); + t->mr[1] = (boot->mr[1] & 0x1007f0) | + t->drive_strength | + (t->odt << 2); + + NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]); + return 0; +} + +int +nouveau_mem_timing_calc(struct drm_device *dev, u32 freq, + struct nouveau_pm_memtiming *t) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_fb *pfb = nouveau_fb(device); + struct nouveau_pm *pm = nouveau_pm(dev); + struct nouveau_pm_memtiming *boot = &pm->boot.timing; + struct nouveau_pm_tbl_entry *e; + u8 ver, len, *ptr, *ramcfg; + int ret; + + ptr = nouveau_perf_timing(dev, freq, &ver, &len); + if (!ptr || ptr[0] == 0x00) { + *t = *boot; + return 0; + } + e = (struct nouveau_pm_tbl_entry *)ptr; + + t->tCWL = boot->tCWL; + + switch (device->card_type) { + case NV_40: + ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t); + break; + case NV_50: + ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t); + break; + case NV_C0: + case NV_D0: + ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t); + break; + default: + ret = -ENODEV; + break; + } + + switch (pfb->ram.type * !ret) { + case NV_MEM_TYPE_GDDR3: + ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t); + break; + case NV_MEM_TYPE_GDDR5: + ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t); + break; + case NV_MEM_TYPE_DDR2: + ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t); + break; + case NV_MEM_TYPE_DDR3: + ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t); + break; + default: + ret = -EINVAL; + break; + } + + ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len); + if (ramcfg) { + int dll_off; + + if (ver == 0x00) + dll_off = !!(ramcfg[3] & 0x04); + else + dll_off = !!(ramcfg[2] & 0x40); + + switch (pfb->ram.type) { + case NV_MEM_TYPE_GDDR3: + t->mr[1] &= ~0x00000040; + t->mr[1] |= 0x00000040 * dll_off; + break; + default: + t->mr[1] &= ~0x00000001; + t->mr[1] |= 0x00000001 * dll_off; + break; + } + } + + return ret; +} + +void +nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_fb *pfb = nouveau_fb(device); + u32 timing_base, timing_regs, mr_base; + int i; + + if (device->card_type >= 0xC0) { + timing_base = 0x10f290; + mr_base = 0x10f300; + } else { + timing_base = 0x100220; + mr_base = 0x1002c0; + } + + t->id = -1; + + switch (device->card_type) { + case NV_50: + timing_regs = 9; + break; + case NV_C0: + case NV_D0: + timing_regs = 5; + break; + case NV_30: + case NV_40: + timing_regs = 3; + break; + default: + timing_regs = 0; + return; + } + for(i = 0; i < timing_regs; i++) + t->reg[i] = nv_rd32(device, timing_base + (0x04 * i)); + + t->tCWL = 0; + if (device->card_type < NV_C0) { + t->tCWL = ((nv_rd32(device, 0x100228) & 0x0f000000) >> 24) + 1; + } else if (device->card_type <= NV_D0) { + t->tCWL = ((nv_rd32(device, 0x10f294) & 0x00000f80) >> 7); + } + + t->mr[0] = nv_rd32(device, mr_base); + t->mr[1] = nv_rd32(device, mr_base + 0x04); + t->mr[2] = nv_rd32(device, mr_base + 0x20); + t->mr[3] = nv_rd32(device, mr_base + 0x24); + + t->odt = 0; + t->drive_strength = 0; + + switch (pfb->ram.type) { + case NV_MEM_TYPE_DDR3: + t->odt |= (t->mr[1] & 0x200) >> 7; + case NV_MEM_TYPE_DDR2: + t->odt |= (t->mr[1] & 0x04) >> 2 | + (t->mr[1] & 0x40) >> 5; + break; + case NV_MEM_TYPE_GDDR3: + case NV_MEM_TYPE_GDDR5: + t->drive_strength = t->mr[1] & 0x03; + t->odt = (t->mr[1] & 0x0c) >> 2; + break; + default: + break; + } +} + +int +nouveau_mem_exec(struct nouveau_mem_exec_func *exec, + struct nouveau_pm_level *perflvl) +{ + struct nouveau_drm *drm = nouveau_drm(exec->dev); + struct nouveau_device *device = nouveau_dev(exec->dev); + struct nouveau_fb *pfb = nouveau_fb(device); + struct nouveau_pm_memtiming *info = &perflvl->timing; + u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0; + u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] }; + u32 mr1_dlloff; + + switch (pfb->ram.type) { + case NV_MEM_TYPE_DDR2: + tDLLK = 2000; + mr1_dlloff = 0x00000001; + break; + case NV_MEM_TYPE_DDR3: + tDLLK = 12000; + tCKSRE = 2000; + tXS = 1000; + mr1_dlloff = 0x00000001; + break; + case NV_MEM_TYPE_GDDR3: + tDLLK = 40000; + mr1_dlloff = 0x00000040; + break; + default: + NV_ERROR(drm, "cannot reclock unsupported memtype\n"); + return -ENODEV; + } + + /* fetch current MRs */ + switch (pfb->ram.type) { + case NV_MEM_TYPE_GDDR3: + case NV_MEM_TYPE_DDR3: + mr[2] = exec->mrg(exec, 2); + default: + mr[1] = exec->mrg(exec, 1); + mr[0] = exec->mrg(exec, 0); + break; + } + + /* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh */ + if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) { + exec->precharge(exec); + exec->mrs (exec, 1, mr[1] | mr1_dlloff); + exec->wait(exec, tMRD); + } + + /* enter self-refresh mode */ + exec->precharge(exec); + exec->refresh(exec); + exec->refresh(exec); + exec->refresh_auto(exec, false); + exec->refresh_self(exec, true); + exec->wait(exec, tCKSRE); + + /* modify input clock frequency */ + exec->clock_set(exec); + + /* exit self-refresh mode */ + exec->wait(exec, tCKSRX); + exec->precharge(exec); + exec->refresh_self(exec, false); + exec->refresh_auto(exec, true); + exec->wait(exec, tXS); + exec->wait(exec, tXS); + + /* update MRs */ + if (mr[2] != info->mr[2]) { + exec->mrs (exec, 2, info->mr[2]); + exec->wait(exec, tMRD); + } + + if (mr[1] != info->mr[1]) { + /* need to keep DLL off until later, at least on GDDR3 */ + exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff)); + exec->wait(exec, tMRD); + } + + if (mr[0] != info->mr[0]) { + exec->mrs (exec, 0, info->mr[0]); + exec->wait(exec, tMRD); + } + + /* update PFB timing registers */ + exec->timing_set(exec); + + /* DLL (enable + ) reset */ + if (!(info->mr[1] & mr1_dlloff)) { + if (mr[1] & mr1_dlloff) { + exec->mrs (exec, 1, info->mr[1]); + exec->wait(exec, tMRD); + } + exec->mrs (exec, 0, info->mr[0] | 0x00000100); + exec->wait(exec, tMRD); + exec->mrs (exec, 0, info->mr[0] | 0x00000000); + exec->wait(exec, tMRD); + exec->wait(exec, tDLLK); + if (pfb->ram.type == NV_MEM_TYPE_GDDR3) + exec->precharge(exec); + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c new file mode 100644 index 0000000..4fe883c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c @@ -0,0 +1,416 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <drm/drmP.h> + +#include "nouveau_drm.h" +#include "nouveau_reg.h" +#include "nouveau_pm.h" + +static u8 * +nouveau_perf_table(struct drm_device *dev, u8 *ver) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvbios *bios = &drm->vbios; + struct bit_entry P; + + if (!bit_table(dev, 'P', &P) && P.version && P.version <= 2) { + u8 *perf = ROMPTR(dev, P.data[0]); + if (perf) { + *ver = perf[0]; + return perf; + } + } + + if (bios->type == NVBIOS_BMP) { + if (bios->data[bios->offset + 6] >= 0x25) { + u8 *perf = ROMPTR(dev, bios->data[bios->offset + 0x94]); + if (perf) { + *ver = perf[1]; + return perf; + } + } + } + + return NULL; +} + +static u8 * +nouveau_perf_entry(struct drm_device *dev, int idx, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +{ + u8 *perf = nouveau_perf_table(dev, ver); + if (perf) { + if (*ver >= 0x12 && *ver < 0x20 && idx < perf[2]) { + *hdr = perf[3]; + *cnt = 0; + *len = 0; + return perf + perf[0] + idx * perf[3]; + } else + if (*ver >= 0x20 && *ver < 0x40 && idx < perf[2]) { + *hdr = perf[3]; + *cnt = perf[4]; + *len = perf[5]; + return perf + perf[1] + idx * (*hdr + (*cnt * *len)); + } else + if (*ver >= 0x40 && *ver < 0x41 && idx < perf[5]) { + *hdr = perf[2]; + *cnt = perf[4]; + *len = perf[3]; + return perf + perf[1] + idx * (*hdr + (*cnt * *len)); + } + } + return NULL; +} + +u8 * +nouveau_perf_rammap(struct drm_device *dev, u32 freq, + u8 *ver, u8 *hdr, u8 *cnt, u8 *len) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct bit_entry P; + u8 *perf, i = 0; + + if (!bit_table(dev, 'P', &P) && P.version == 2) { + u8 *rammap = ROMPTR(dev, P.data[4]); + if (rammap) { + u8 *ramcfg = rammap + rammap[1]; + + *ver = rammap[0]; + *hdr = rammap[2]; + *cnt = rammap[4]; + *len = rammap[3]; + + freq /= 1000; + for (i = 0; i < rammap[5]; i++) { + if (freq >= ROM16(ramcfg[0]) && + freq <= ROM16(ramcfg[2])) + return ramcfg; + + ramcfg += *hdr + (*cnt * *len); + } + } + + return NULL; + } + + if (nv_device(drm->device)->chipset == 0x49 || + nv_device(drm->device)->chipset == 0x4b) + freq /= 2; + + while ((perf = nouveau_perf_entry(dev, i++, ver, hdr, cnt, len))) { + if (*ver >= 0x20 && *ver < 0x25) { + if (perf[0] != 0xff && freq <= ROM16(perf[11]) * 1000) + break; + } else + if (*ver >= 0x25 && *ver < 0x40) { + if (perf[0] != 0xff && freq <= ROM16(perf[12]) * 1000) + break; + } + } + + if (perf) { + u8 *ramcfg = perf + *hdr; + *ver = 0x00; + *hdr = 0; + return ramcfg; + } + + return NULL; +} + +u8 * +nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvbios *bios = &drm->vbios; + u8 strap, hdr, cnt; + u8 *rammap; + + strap = (nv_rd32(device, 0x101000) & 0x0000003c) >> 2; + if (bios->ram_restrict_tbl_ptr) + strap = bios->data[bios->ram_restrict_tbl_ptr + strap]; + + rammap = nouveau_perf_rammap(dev, freq, ver, &hdr, &cnt, len); + if (rammap && strap < cnt) + return rammap + hdr + (strap * *len); + + return NULL; +} + +u8 * +nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvbios *bios = &drm->vbios; + struct bit_entry P; + u8 *perf, *timing = NULL; + u8 i = 0, hdr, cnt; + + if (bios->type == NVBIOS_BMP) { + while ((perf = nouveau_perf_entry(dev, i++, ver, &hdr, &cnt, + len)) && *ver == 0x15) { + if (freq <= ROM32(perf[5]) * 20) { + *ver = 0x00; + *len = 14; + return perf + 41; + } + } + return NULL; + } + + if (!bit_table(dev, 'P', &P)) { + if (P.version == 1) + timing = ROMPTR(dev, P.data[4]); + else + if (P.version == 2) + timing = ROMPTR(dev, P.data[8]); + } + + if (timing && timing[0] == 0x10) { + u8 *ramcfg = nouveau_perf_ramcfg(dev, freq, ver, len); + if (ramcfg && ramcfg[1] < timing[2]) { + *ver = timing[0]; + *len = timing[3]; + return timing + timing[1] + (ramcfg[1] * timing[3]); + } + } + + return NULL; +} + +static void +legacy_perf_init(struct drm_device *dev) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvbios *bios = &drm->vbios; + struct nouveau_pm *pm = nouveau_pm(dev); + char *perf, *entry, *bmp = &bios->data[bios->offset]; + int headerlen, use_straps; + + if (bmp[5] < 0x5 || bmp[6] < 0x14) { + NV_DEBUG(drm, "BMP version too old for perf\n"); + return; + } + + perf = ROMPTR(dev, bmp[0x73]); + if (!perf) { + NV_DEBUG(drm, "No memclock table pointer found.\n"); + return; + } + + switch (perf[0]) { + case 0x12: + case 0x14: + case 0x18: + use_straps = 0; + headerlen = 1; + break; + case 0x01: + use_straps = perf[1] & 1; + headerlen = (use_straps ? 8 : 2); + break; + default: + NV_WARN(drm, "Unknown memclock table version %x.\n", perf[0]); + return; + } + + entry = perf + headerlen; + if (use_straps) + entry += (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1; + + sprintf(pm->perflvl[0].name, "performance_level_0"); + pm->perflvl[0].memory = ROM16(entry[0]) * 20; + pm->nr_perflvl = 1; +} + +static void +nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct bit_entry P; + u8 *vmap; + int id; + + id = perflvl->volt_min; + perflvl->volt_min = 0; + + /* boards using voltage table version <0x40 store the voltage + * level directly in the perflvl entry as a multiple of 10mV + */ + if (drm->pm->voltage.version < 0x40) { + perflvl->volt_min = id * 10000; + perflvl->volt_max = perflvl->volt_min; + return; + } + + /* on newer ones, the perflvl stores an index into yet another + * vbios table containing a min/max voltage value for the perflvl + */ + if (bit_table(dev, 'P', &P) || P.version != 2 || P.length < 34) { + NV_DEBUG(drm, "where's our volt map table ptr? %d %d\n", + P.version, P.length); + return; + } + + vmap = ROMPTR(dev, P.data[32]); + if (!vmap) { + NV_DEBUG(drm, "volt map table pointer invalid\n"); + return; + } + + if (id < vmap[3]) { + vmap += vmap[1] + (vmap[2] * id); + perflvl->volt_min = ROM32(vmap[0]); + perflvl->volt_max = ROM32(vmap[4]); + } +} + +void +nouveau_perf_init(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_pm *pm = nouveau_pm(dev); + struct nvbios *bios = &drm->vbios; + u8 *perf, ver, hdr, cnt, len; + int ret, vid, i = -1; + + if (bios->type == NVBIOS_BMP && bios->data[bios->offset + 6] < 0x25) { + legacy_perf_init(dev); + return; + } + + perf = nouveau_perf_table(dev, &ver); + + while ((perf = nouveau_perf_entry(dev, ++i, &ver, &hdr, &cnt, &len))) { + struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; + + if (perf[0] == 0xff) + continue; + + switch (ver) { + case 0x12: + case 0x13: + case 0x15: + perflvl->fanspeed = perf[55]; + if (hdr > 56) + perflvl->volt_min = perf[56]; + perflvl->core = ROM32(perf[1]) * 10; + perflvl->memory = ROM32(perf[5]) * 20; + break; + case 0x21: + case 0x23: + case 0x24: + perflvl->fanspeed = perf[4]; + perflvl->volt_min = perf[5]; + perflvl->shader = ROM16(perf[6]) * 1000; + perflvl->core = perflvl->shader; + perflvl->core += (signed char)perf[8] * 1000; + if (nv_device(drm->device)->chipset == 0x49 || + nv_device(drm->device)->chipset == 0x4b) + perflvl->memory = ROM16(perf[11]) * 1000; + else + perflvl->memory = ROM16(perf[11]) * 2000; + break; + case 0x25: + perflvl->fanspeed = perf[4]; + perflvl->volt_min = perf[5]; + perflvl->core = ROM16(perf[6]) * 1000; + perflvl->shader = ROM16(perf[10]) * 1000; + perflvl->memory = ROM16(perf[12]) * 1000; + break; + case 0x30: + perflvl->memscript = ROM16(perf[2]); + case 0x35: + perflvl->fanspeed = perf[6]; + perflvl->volt_min = perf[7]; + perflvl->core = ROM16(perf[8]) * 1000; + perflvl->shader = ROM16(perf[10]) * 1000; + perflvl->memory = ROM16(perf[12]) * 1000; + perflvl->vdec = ROM16(perf[16]) * 1000; + perflvl->dom6 = ROM16(perf[20]) * 1000; + break; + case 0x40: +#define subent(n) ((ROM16(perf[hdr + (n) * len]) & 0xfff) * 1000) + perflvl->fanspeed = 0; /*XXX*/ + perflvl->volt_min = perf[2]; + if (nv_device(drm->device)->card_type == NV_50) { + perflvl->core = subent(0); + perflvl->shader = subent(1); + perflvl->memory = subent(2); + perflvl->vdec = subent(3); + perflvl->unka0 = subent(4); + } else { + perflvl->hub06 = subent(0); + perflvl->hub01 = subent(1); + perflvl->copy = subent(2); + perflvl->shader = subent(3); + perflvl->rop = subent(4); + perflvl->memory = subent(5); + perflvl->vdec = subent(6); + perflvl->daemon = subent(10); + perflvl->hub07 = subent(11); + perflvl->core = perflvl->shader / 2; + } + break; + } + + /* make sure vid is valid */ + nouveau_perf_voltage(dev, perflvl); + if (pm->voltage.supported && perflvl->volt_min) { + vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min); + if (vid < 0) { + NV_DEBUG(drm, "perflvl %d, bad vid\n", i); + continue; + } + } + + /* get the corresponding memory timings */ + ret = nouveau_mem_timing_calc(dev, perflvl->memory, + &perflvl->timing); + if (ret) { + NV_DEBUG(drm, "perflvl %d, bad timing: %d\n", i, ret); + continue; + } + + snprintf(perflvl->name, sizeof(perflvl->name), + "performance_level_%d", i); + perflvl->id = i; + + snprintf(perflvl->profile.name, sizeof(perflvl->profile.name), + "%d", perflvl->id); + perflvl->profile.func = &nouveau_pm_static_profile_func; + list_add_tail(&perflvl->profile.head, &pm->profiles); + + + pm->nr_perflvl++; + } +} + +void +nouveau_perf_fini(struct drm_device *dev) +{ +} diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c new file mode 100644 index 0000000..936b442 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -0,0 +1,1174 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#ifdef CONFIG_ACPI +#include <linux/acpi.h> +#endif +#include <linux/power_supply.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> + +#include <drm/drmP.h> + +#include "nouveau_drm.h" +#include "nouveau_pm.h" + +#include <subdev/gpio.h> +#include <subdev/timer.h> +#include <subdev/therm.h> + +MODULE_PARM_DESC(perflvl, "Performance level (default: boot)"); +static char *nouveau_perflvl; +module_param_named(perflvl, nouveau_perflvl, charp, 0400); + +MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)"); +static int nouveau_perflvl_wr; +module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400); + +static int +nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl, + struct nouveau_pm_level *a, struct nouveau_pm_level *b) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_pm *pm = nouveau_pm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + int ret; + + /*XXX: not on all boards, we should control based on temperature + * on recent boards.. or maybe on some other factor we don't + * know about? + */ + if (therm && therm->fan_set && + a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) { + ret = therm->fan_set(therm, perflvl->fanspeed); + if (ret && ret != -ENODEV) { + NV_ERROR(drm, "fanspeed set failed: %d\n", ret); + } + } + + if (pm->voltage.supported && pm->voltage_set) { + if (perflvl->volt_min && b->volt_min > a->volt_min) { + ret = pm->voltage_set(dev, perflvl->volt_min); + if (ret) { + NV_ERROR(drm, "voltage set failed: %d\n", ret); + return ret; + } + } + } + + return 0; +} + +static int +nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl) +{ + struct nouveau_pm *pm = nouveau_pm(dev); + void *state; + int ret; + + if (perflvl == pm->cur) + return 0; + + ret = nouveau_pm_perflvl_aux(dev, perflvl, pm->cur, perflvl); + if (ret) + return ret; + + state = pm->clocks_pre(dev, perflvl); + if (IS_ERR(state)) { + ret = PTR_ERR(state); + goto error; + } + ret = pm->clocks_set(dev, state); + if (ret) + goto error; + + ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur); + if (ret) + return ret; + + pm->cur = perflvl; + return 0; + +error: + /* restore the fan speed and voltage before leaving */ + nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur); + return ret; +} + +void +nouveau_pm_trigger(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_timer *ptimer = nouveau_timer(drm->device); + struct nouveau_pm *pm = nouveau_pm(dev); + struct nouveau_pm_profile *profile = NULL; + struct nouveau_pm_level *perflvl = NULL; + int ret; + + /* select power profile based on current power source */ + if (power_supply_is_system_supplied()) + profile = pm->profile_ac; + else + profile = pm->profile_dc; + + if (profile != pm->profile) { + pm->profile->func->fini(pm->profile); + pm->profile = profile; + pm->profile->func->init(pm->profile); + } + + /* select performance level based on profile */ + perflvl = profile->func->select(profile); + + /* change perflvl, if necessary */ + if (perflvl != pm->cur) { + u64 time0 = ptimer->read(ptimer); + + NV_INFO(drm, "setting performance level: %d", perflvl->id); + ret = nouveau_pm_perflvl_set(dev, perflvl); + if (ret) + NV_INFO(drm, "> reclocking failed: %d\n\n", ret); + + NV_INFO(drm, "> reclocking took %lluns\n\n", + ptimer->read(ptimer) - time0); + } +} + +static struct nouveau_pm_profile * +profile_find(struct drm_device *dev, const char *string) +{ + struct nouveau_pm *pm = nouveau_pm(dev); + struct nouveau_pm_profile *profile; + + list_for_each_entry(profile, &pm->profiles, head) { + if (!strncmp(profile->name, string, sizeof(profile->name))) + return profile; + } + + return NULL; +} + +static int +nouveau_pm_profile_set(struct drm_device *dev, const char *profile) +{ + struct nouveau_pm *pm = nouveau_pm(dev); + struct nouveau_pm_profile *ac = NULL, *dc = NULL; + char string[16], *cur = string, *ptr; + + /* safety precaution, for now */ + if (nouveau_perflvl_wr != 7777) + return -EPERM; + + strncpy(string, profile, sizeof(string)); + string[sizeof(string) - 1] = 0; + if ((ptr = strchr(string, '\n'))) + *ptr = '\0'; + + ptr = strsep(&cur, ","); + if (ptr) + ac = profile_find(dev, ptr); + + ptr = strsep(&cur, ","); + if (ptr) + dc = profile_find(dev, ptr); + else + dc = ac; + + if (ac == NULL || dc == NULL) + return -EINVAL; + + pm->profile_ac = ac; + pm->profile_dc = dc; + nouveau_pm_trigger(dev); + return 0; +} + +static void +nouveau_pm_static_dummy(struct nouveau_pm_profile *profile) +{ +} + +static struct nouveau_pm_level * +nouveau_pm_static_select(struct nouveau_pm_profile *profile) +{ + return container_of(profile, struct nouveau_pm_level, profile); +} + +const struct nouveau_pm_profile_func nouveau_pm_static_profile_func = { + .destroy = nouveau_pm_static_dummy, + .init = nouveau_pm_static_dummy, + .fini = nouveau_pm_static_dummy, + .select = nouveau_pm_static_select, +}; + +static int +nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_pm *pm = nouveau_pm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + int ret; + + memset(perflvl, 0, sizeof(*perflvl)); + + if (pm->clocks_get) { + ret = pm->clocks_get(dev, perflvl); + if (ret) + return ret; + } + + if (pm->voltage.supported && pm->voltage_get) { + ret = pm->voltage_get(dev); + if (ret > 0) { + perflvl->volt_min = ret; + perflvl->volt_max = ret; + } + } + + if (therm && therm->fan_get) { + ret = therm->fan_get(therm); + if (ret >= 0) + perflvl->fanspeed = ret; + } + + nouveau_mem_timing_read(dev, &perflvl->timing); + return 0; +} + +static void +nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) +{ + char c[16], s[16], v[32], f[16], m[16]; + + c[0] = '\0'; + if (perflvl->core) + snprintf(c, sizeof(c), " core %dMHz", perflvl->core / 1000); + + s[0] = '\0'; + if (perflvl->shader) + snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000); + + m[0] = '\0'; + if (perflvl->memory) + snprintf(m, sizeof(m), " memory %dMHz", perflvl->memory / 1000); + + v[0] = '\0'; + if (perflvl->volt_min && perflvl->volt_min != perflvl->volt_max) { + snprintf(v, sizeof(v), " voltage %dmV-%dmV", + perflvl->volt_min / 1000, perflvl->volt_max / 1000); + } else + if (perflvl->volt_min) { + snprintf(v, sizeof(v), " voltage %dmV", + perflvl->volt_min / 1000); + } + + f[0] = '\0'; + if (perflvl->fanspeed) + snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed); + + snprintf(ptr, len, "%s%s%s%s%s\n", c, s, m, v, f); +} + +static ssize_t +nouveau_pm_get_perflvl_info(struct device *d, + struct device_attribute *a, char *buf) +{ + struct nouveau_pm_level *perflvl = + container_of(a, struct nouveau_pm_level, dev_attr); + char *ptr = buf; + int len = PAGE_SIZE; + + snprintf(ptr, len, "%d:", perflvl->id); + ptr += strlen(buf); + len -= strlen(buf); + + nouveau_pm_perflvl_info(perflvl, ptr, len); + return strlen(buf); +} + +static ssize_t +nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf) +{ + struct drm_device *dev = pci_get_drvdata(to_pci_dev(d)); + struct nouveau_pm *pm = nouveau_pm(dev); + struct nouveau_pm_level cur; + int len = PAGE_SIZE, ret; + char *ptr = buf; + + snprintf(ptr, len, "profile: %s, %s\nc:", + pm->profile_ac->name, pm->profile_dc->name); + ptr += strlen(buf); + len -= strlen(buf); + + ret = nouveau_pm_perflvl_get(dev, &cur); + if (ret == 0) + nouveau_pm_perflvl_info(&cur, ptr, len); + return strlen(buf); +} + +static ssize_t +nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a, + const char *buf, size_t count) +{ + struct drm_device *dev = pci_get_drvdata(to_pci_dev(d)); + int ret; + + ret = nouveau_pm_profile_set(dev, buf); + if (ret) + return ret; + return strlen(buf); +} + +static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR, + nouveau_pm_get_perflvl, nouveau_pm_set_perflvl); + +static int +nouveau_sysfs_init(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_pm *pm = nouveau_pm(dev); + struct device *d = &dev->pdev->dev; + int ret, i; + + ret = device_create_file(d, &dev_attr_performance_level); + if (ret) + return ret; + + for (i = 0; i < pm->nr_perflvl; i++) { + struct nouveau_pm_level *perflvl = &pm->perflvl[i]; + + perflvl->dev_attr.attr.name = perflvl->name; + perflvl->dev_attr.attr.mode = S_IRUGO; + perflvl->dev_attr.show = nouveau_pm_get_perflvl_info; + perflvl->dev_attr.store = NULL; + sysfs_attr_init(&perflvl->dev_attr.attr); + + ret = device_create_file(d, &perflvl->dev_attr); + if (ret) { + NV_ERROR(drm, "failed pervlvl %d sysfs: %d\n", + perflvl->id, i); + perflvl->dev_attr.attr.name = NULL; + nouveau_pm_fini(dev); + return ret; + } + } + + return 0; +} + +static void +nouveau_sysfs_fini(struct drm_device *dev) +{ + struct nouveau_pm *pm = nouveau_pm(dev); + struct device *d = &dev->pdev->dev; + int i; + + device_remove_file(d, &dev_attr_performance_level); + for (i = 0; i < pm->nr_perflvl; i++) { + struct nouveau_pm_level *pl = &pm->perflvl[i]; + + if (!pl->dev_attr.attr.name) + break; + + device_remove_file(d, &pl->dev_attr); + } +} + +#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) +static ssize_t +nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + int temp = therm->temp_get(therm); + + if (temp < 0) + return temp; + + return snprintf(buf, PAGE_SIZE, "%d\n", temp * 1000); +} +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp, + NULL, 0); + +static ssize_t +nouveau_hwmon_show_temp1_auto_point1_pwm(struct device *d, + struct device_attribute *a, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", 100); +} +static SENSOR_DEVICE_ATTR(temp1_auto_point1_pwm, S_IRUGO, + nouveau_hwmon_show_temp1_auto_point1_pwm, NULL, 0); + +static ssize_t +nouveau_hwmon_temp1_auto_point1_temp(struct device *d, + struct device_attribute *a, char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + + return snprintf(buf, PAGE_SIZE, "%d\n", + therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000); +} +static ssize_t +nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d, + struct device_attribute *a, + const char *buf, size_t count) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + long value; + + if (kstrtol(buf, 10, &value) == -EINVAL) + return count; + + therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST, + value / 1000); + + return count; +} +static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp, S_IRUGO | S_IWUSR, + nouveau_hwmon_temp1_auto_point1_temp, + nouveau_hwmon_set_temp1_auto_point1_temp, 0); + +static ssize_t +nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d, + struct device_attribute *a, char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + + return snprintf(buf, PAGE_SIZE, "%d\n", + therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000); +} +static ssize_t +nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d, + struct device_attribute *a, + const char *buf, size_t count) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + long value; + + if (kstrtol(buf, 10, &value) == -EINVAL) + return count; + + therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST, + value / 1000); + + return count; +} +static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp_hyst, S_IRUGO | S_IWUSR, + nouveau_hwmon_temp1_auto_point1_temp_hyst, + nouveau_hwmon_set_temp1_auto_point1_temp_hyst, 0); + +static ssize_t +nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + + return snprintf(buf, PAGE_SIZE, "%d\n", + therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK) * 1000); +} +static ssize_t +nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a, + const char *buf, size_t count) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + long value; + + if (kstrtol(buf, 10, &value) == -EINVAL) + return count; + + therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK, value / 1000); + + return count; +} +static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp, + nouveau_hwmon_set_max_temp, + 0); + +static ssize_t +nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a, + char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + + return snprintf(buf, PAGE_SIZE, "%d\n", + therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000); +} +static ssize_t +nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a, + const char *buf, size_t count) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + long value; + + if (kstrtol(buf, 10, &value) == -EINVAL) + return count; + + therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST, + value / 1000); + + return count; +} +static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, + nouveau_hwmon_max_temp_hyst, + nouveau_hwmon_set_max_temp_hyst, 0); + +static ssize_t +nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a, + char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + + return snprintf(buf, PAGE_SIZE, "%d\n", + therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL) * 1000); +} +static ssize_t +nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a, + const char *buf, + size_t count) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + long value; + + if (kstrtol(buf, 10, &value) == -EINVAL) + return count; + + therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL, value / 1000); + + return count; +} +static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR, + nouveau_hwmon_critical_temp, + nouveau_hwmon_set_critical_temp, + 0); + +static ssize_t +nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a, + char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + + return snprintf(buf, PAGE_SIZE, "%d\n", + therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000); +} +static ssize_t +nouveau_hwmon_set_critical_temp_hyst(struct device *d, + struct device_attribute *a, + const char *buf, + size_t count) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + long value; + + if (kstrtol(buf, 10, &value) == -EINVAL) + return count; + + therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST, + value / 1000); + + return count; +} +static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR, + nouveau_hwmon_critical_temp_hyst, + nouveau_hwmon_set_critical_temp_hyst, 0); +static ssize_t +nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a, + char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + + return snprintf(buf, PAGE_SIZE, "%d\n", + therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000); +} +static ssize_t +nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a, + const char *buf, + size_t count) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + long value; + + if (kstrtol(buf, 10, &value) == -EINVAL) + return count; + + therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN, value / 1000); + + return count; +} +static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO | S_IWUSR, + nouveau_hwmon_emergency_temp, + nouveau_hwmon_set_emergency_temp, + 0); + +static ssize_t +nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a, + char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + + return snprintf(buf, PAGE_SIZE, "%d\n", + therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000); +} +static ssize_t +nouveau_hwmon_set_emergency_temp_hyst(struct device *d, + struct device_attribute *a, + const char *buf, + size_t count) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + long value; + + if (kstrtol(buf, 10, &value) == -EINVAL) + return count; + + therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST, + value / 1000); + + return count; +} +static SENSOR_DEVICE_ATTR(temp1_emergency_hyst, S_IRUGO | S_IWUSR, + nouveau_hwmon_emergency_temp_hyst, + nouveau_hwmon_set_emergency_temp_hyst, + 0); + +static ssize_t nouveau_hwmon_show_name(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "nouveau\n"); +} +static SENSOR_DEVICE_ATTR(name, S_IRUGO, nouveau_hwmon_show_name, NULL, 0); + +static ssize_t nouveau_hwmon_show_update_rate(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "1000\n"); +} +static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO, + nouveau_hwmon_show_update_rate, + NULL, 0); + +static ssize_t +nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + + return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm)); +} +static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, nouveau_hwmon_show_fan1_input, + NULL, 0); + + static ssize_t +nouveau_hwmon_get_pwm1_enable(struct device *d, + struct device_attribute *a, char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + int ret; + + ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MODE); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", ret); +} + +static ssize_t +nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a, + const char *buf, size_t count) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + long value; + int ret; + + if (strict_strtol(buf, 10, &value) == -EINVAL) + return -EINVAL; + + ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MODE, value); + if (ret) + return ret; + else + return count; +} +static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, + nouveau_hwmon_get_pwm1_enable, + nouveau_hwmon_set_pwm1_enable, 0); + +static ssize_t +nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + int ret; + + ret = therm->fan_get(therm); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", ret); +} + +static ssize_t +nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a, + const char *buf, size_t count) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + int ret = -ENODEV; + long value; + + if (nouveau_perflvl_wr != 7777) + return -EPERM; + + if (kstrtol(buf, 10, &value) == -EINVAL) + return -EINVAL; + + ret = therm->fan_set(therm, value); + if (ret) + return ret; + + return count; +} + +static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, + nouveau_hwmon_get_pwm1, + nouveau_hwmon_set_pwm1, 0); + +static ssize_t +nouveau_hwmon_get_pwm1_min(struct device *d, + struct device_attribute *a, char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + int ret; + + ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", ret); +} + +static ssize_t +nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a, + const char *buf, size_t count) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + long value; + int ret; + + if (kstrtol(buf, 10, &value) == -EINVAL) + return -EINVAL; + + ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY, value); + if (ret < 0) + return ret; + + return count; +} + +static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO | S_IWUSR, + nouveau_hwmon_get_pwm1_min, + nouveau_hwmon_set_pwm1_min, 0); + +static ssize_t +nouveau_hwmon_get_pwm1_max(struct device *d, + struct device_attribute *a, char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + int ret; + + ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY); + if (ret < 0) + return ret; + + return sprintf(buf, "%i\n", ret); +} + +static ssize_t +nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a, + const char *buf, size_t count) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + long value; + int ret; + + if (kstrtol(buf, 10, &value) == -EINVAL) + return -EINVAL; + + ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY, value); + if (ret < 0) + return ret; + + return count; +} + +static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR, + nouveau_hwmon_get_pwm1_max, + nouveau_hwmon_set_pwm1_max, 0); + +static struct attribute *hwmon_default_attributes[] = { + &sensor_dev_attr_name.dev_attr.attr, + &sensor_dev_attr_update_rate.dev_attr.attr, + NULL +}; +static struct attribute *hwmon_temp_attributes[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr, + &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr, + &sensor_dev_attr_temp1_auto_point1_temp_hyst.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, + &sensor_dev_attr_temp1_emergency.dev_attr.attr, + &sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr, + NULL +}; +static struct attribute *hwmon_fan_rpm_attributes[] = { + &sensor_dev_attr_fan1_input.dev_attr.attr, + NULL +}; +static struct attribute *hwmon_pwm_fan_attributes[] = { + &sensor_dev_attr_pwm1_enable.dev_attr.attr, + &sensor_dev_attr_pwm1.dev_attr.attr, + &sensor_dev_attr_pwm1_min.dev_attr.attr, + &sensor_dev_attr_pwm1_max.dev_attr.attr, + NULL +}; + +static const struct attribute_group hwmon_default_attrgroup = { + .attrs = hwmon_default_attributes, +}; +static const struct attribute_group hwmon_temp_attrgroup = { + .attrs = hwmon_temp_attributes, +}; +static const struct attribute_group hwmon_fan_rpm_attrgroup = { + .attrs = hwmon_fan_rpm_attributes, +}; +static const struct attribute_group hwmon_pwm_fan_attrgroup = { + .attrs = hwmon_pwm_fan_attributes, +}; +#endif + +static int +nouveau_hwmon_init(struct drm_device *dev) +{ + struct nouveau_pm *pm = nouveau_pm(dev); + +#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_therm *therm = nouveau_therm(drm->device); + struct device *hwmon_dev; + int ret = 0; + + if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set) + return -ENODEV; + + hwmon_dev = hwmon_device_register(&dev->pdev->dev); + if (IS_ERR(hwmon_dev)) { + ret = PTR_ERR(hwmon_dev); + NV_ERROR(drm, "Unable to register hwmon device: %d\n", ret); + return ret; + } + dev_set_drvdata(hwmon_dev, dev); + + /* set the default attributes */ + ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup); + if (ret) { + if (ret) + goto error; + } + + /* if the card has a working thermal sensor */ + if (therm->temp_get(therm) >= 0) { + ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup); + if (ret) { + if (ret) + goto error; + } + } + + /* if the card has a pwm fan */ + /*XXX: incorrect, need better detection for this, some boards have + * the gpio entries for pwm fan control even when there's no + * actual fan connected to it... therm table? */ + if (therm->fan_get && therm->fan_get(therm) >= 0) { + ret = sysfs_create_group(&hwmon_dev->kobj, + &hwmon_pwm_fan_attrgroup); + if (ret) + goto error; + } + + /* if the card can read the fan rpm */ + if (therm->fan_sense(therm) >= 0) { + ret = sysfs_create_group(&hwmon_dev->kobj, + &hwmon_fan_rpm_attrgroup); + if (ret) + goto error; + } + + pm->hwmon = hwmon_dev; + + return 0; + +error: + NV_ERROR(drm, "Unable to create some hwmon sysfs files: %d\n", ret); + hwmon_device_unregister(hwmon_dev); + pm->hwmon = NULL; + return ret; +#else + pm->hwmon = NULL; + return 0; +#endif +} + +static void +nouveau_hwmon_fini(struct drm_device *dev) +{ +#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) + struct nouveau_pm *pm = nouveau_pm(dev); + + if (pm->hwmon) { + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_default_attrgroup); + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_temp_attrgroup); + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_pwm_fan_attrgroup); + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_fan_rpm_attrgroup); + + hwmon_device_unregister(pm->hwmon); + } +#endif +} + +#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY) +static int +nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data) +{ + struct nouveau_pm *pm = container_of(nb, struct nouveau_pm, acpi_nb); + struct nouveau_drm *drm = nouveau_drm(pm->dev); + struct acpi_bus_event *entry = (struct acpi_bus_event *)data; + + if (strcmp(entry->device_class, "ac_adapter") == 0) { + bool ac = power_supply_is_system_supplied(); + + NV_DEBUG(drm, "power supply changed: %s\n", ac ? "AC" : "DC"); + nouveau_pm_trigger(pm->dev); + } + + return NOTIFY_OK; +} +#endif + +int +nouveau_pm_init(struct drm_device *dev) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_pm *pm; + char info[256]; + int ret, i; + + pm = drm->pm = kzalloc(sizeof(*pm), GFP_KERNEL); + if (!pm) + return -ENOMEM; + + pm->dev = dev; + + if (device->card_type < NV_40) { + pm->clocks_get = nv04_pm_clocks_get; + pm->clocks_pre = nv04_pm_clocks_pre; + pm->clocks_set = nv04_pm_clocks_set; + if (nouveau_gpio(drm->device)) { + pm->voltage_get = nouveau_voltage_gpio_get; + pm->voltage_set = nouveau_voltage_gpio_set; + } + } else + if (device->card_type < NV_50) { + pm->clocks_get = nv40_pm_clocks_get; + pm->clocks_pre = nv40_pm_clocks_pre; + pm->clocks_set = nv40_pm_clocks_set; + pm->voltage_get = nouveau_voltage_gpio_get; + pm->voltage_set = nouveau_voltage_gpio_set; + } else + if (device->card_type < NV_C0) { + if (device->chipset < 0xa3 || + device->chipset == 0xaa || + device->chipset == 0xac) { + pm->clocks_get = nv50_pm_clocks_get; + pm->clocks_pre = nv50_pm_clocks_pre; + pm->clocks_set = nv50_pm_clocks_set; + } else { + pm->clocks_get = nva3_pm_clocks_get; + pm->clocks_pre = nva3_pm_clocks_pre; + pm->clocks_set = nva3_pm_clocks_set; + } + pm->voltage_get = nouveau_voltage_gpio_get; + pm->voltage_set = nouveau_voltage_gpio_set; + } else + if (device->card_type < NV_E0) { + pm->clocks_get = nvc0_pm_clocks_get; + pm->clocks_pre = nvc0_pm_clocks_pre; + pm->clocks_set = nvc0_pm_clocks_set; + pm->voltage_get = nouveau_voltage_gpio_get; + pm->voltage_set = nouveau_voltage_gpio_set; + } + + + /* parse aux tables from vbios */ + nouveau_volt_init(dev); + + INIT_LIST_HEAD(&pm->profiles); + + /* determine current ("boot") performance level */ + ret = nouveau_pm_perflvl_get(dev, &pm->boot); + if (ret) { + NV_ERROR(drm, "failed to determine boot perflvl\n"); + return ret; + } + + strncpy(pm->boot.name, "boot", 4); + strncpy(pm->boot.profile.name, "boot", 4); + pm->boot.profile.func = &nouveau_pm_static_profile_func; + + list_add(&pm->boot.profile.head, &pm->profiles); + + pm->profile_ac = &pm->boot.profile; + pm->profile_dc = &pm->boot.profile; + pm->profile = &pm->boot.profile; + pm->cur = &pm->boot; + + /* add performance levels from vbios */ + nouveau_perf_init(dev); + + /* display available performance levels */ + NV_INFO(drm, "%d available performance level(s)\n", pm->nr_perflvl); + for (i = 0; i < pm->nr_perflvl; i++) { + nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info)); + NV_INFO(drm, "%d:%s", pm->perflvl[i].id, info); + } + + nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info)); + NV_INFO(drm, "c:%s", info); + + /* switch performance levels now if requested */ + if (nouveau_perflvl != NULL) + nouveau_pm_profile_set(dev, nouveau_perflvl); + + nouveau_sysfs_init(dev); + nouveau_hwmon_init(dev); +#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY) + pm->acpi_nb.notifier_call = nouveau_pm_acpi_event; + register_acpi_notifier(&pm->acpi_nb); +#endif + + return 0; +} + +void +nouveau_pm_fini(struct drm_device *dev) +{ + struct nouveau_pm *pm = nouveau_pm(dev); + struct nouveau_pm_profile *profile, *tmp; + + list_for_each_entry_safe(profile, tmp, &pm->profiles, head) { + list_del(&profile->head); + profile->func->destroy(profile); + } + + if (pm->cur != &pm->boot) + nouveau_pm_perflvl_set(dev, &pm->boot); + + nouveau_perf_fini(dev); + nouveau_volt_fini(dev); + +#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY) + unregister_acpi_notifier(&pm->acpi_nb); +#endif + nouveau_hwmon_fini(dev); + nouveau_sysfs_fini(dev); + + nouveau_drm(dev)->pm = NULL; + kfree(pm); +} + +void +nouveau_pm_resume(struct drm_device *dev) +{ + struct nouveau_pm *pm = nouveau_pm(dev); + struct nouveau_pm_level *perflvl; + + if (!pm->cur || pm->cur == &pm->boot) + return; + + perflvl = pm->cur; + pm->cur = &pm->boot; + nouveau_pm_perflvl_set(dev, perflvl); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h new file mode 100644 index 0000000..73b789c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_pm.h @@ -0,0 +1,283 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#ifndef __NOUVEAU_PM_H__ +#define __NOUVEAU_PM_H__ + +#include <subdev/bios/pll.h> +#include <subdev/clock.h> + +struct nouveau_pm_voltage_level { + u32 voltage; /* microvolts */ + u8 vid; +}; + +struct nouveau_pm_voltage { + bool supported; + u8 version; + u8 vid_mask; + + struct nouveau_pm_voltage_level *level; + int nr_level; +}; + +/* Exclusive upper limits */ +#define NV_MEM_CL_DDR2_MAX 8 +#define NV_MEM_WR_DDR2_MAX 9 +#define NV_MEM_CL_DDR3_MAX 17 +#define NV_MEM_WR_DDR3_MAX 17 +#define NV_MEM_CL_GDDR3_MAX 16 +#define NV_MEM_WR_GDDR3_MAX 18 +#define NV_MEM_CL_GDDR5_MAX 21 +#define NV_MEM_WR_GDDR5_MAX 20 + +struct nouveau_pm_memtiming { + int id; + + u32 reg[9]; + u32 mr[4]; + + u8 tCWL; + + u8 odt; + u8 drive_strength; +}; + +struct nouveau_pm_tbl_header { + u8 version; + u8 header_len; + u8 entry_cnt; + u8 entry_len; +}; + +struct nouveau_pm_tbl_entry { + u8 tWR; + u8 tWTR; + u8 tCL; + u8 tRC; + u8 empty_4; + u8 tRFC; /* Byte 5 */ + u8 empty_6; + u8 tRAS; /* Byte 7 */ + u8 empty_8; + u8 tRP; /* Byte 9 */ + u8 tRCDRD; + u8 tRCDWR; + u8 tRRD; + u8 tUNK_13; + u8 RAM_FT1; /* 14, a bitmask of random RAM features */ + u8 empty_15; + u8 tUNK_16; + u8 empty_17; + u8 tUNK_18; + u8 tCWL; + u8 tUNK_20, tUNK_21; +}; + +struct nouveau_pm_profile; +struct nouveau_pm_profile_func { + void (*destroy)(struct nouveau_pm_profile *); + void (*init)(struct nouveau_pm_profile *); + void (*fini)(struct nouveau_pm_profile *); + struct nouveau_pm_level *(*select)(struct nouveau_pm_profile *); +}; + +struct nouveau_pm_profile { + const struct nouveau_pm_profile_func *func; + struct list_head head; + char name[8]; +}; + +#define NOUVEAU_PM_MAX_LEVEL 8 +struct nouveau_pm_level { + struct nouveau_pm_profile profile; + struct device_attribute dev_attr; + char name[32]; + int id; + + struct nouveau_pm_memtiming timing; + u32 memory; + u16 memscript; + + u32 core; + u32 shader; + u32 rop; + u32 copy; + u32 daemon; + u32 vdec; + u32 dom6; + u32 unka0; /* nva3:nvc0 */ + u32 hub01; /* nvc0- */ + u32 hub06; /* nvc0- */ + u32 hub07; /* nvc0- */ + + u32 volt_min; /* microvolts */ + u32 volt_max; + u8 fanspeed; +}; + +struct nouveau_pm_temp_sensor_constants { + u16 offset_constant; + s16 offset_mult; + s16 offset_div; + s16 slope_mult; + s16 slope_div; +}; + +struct nouveau_pm_threshold_temp { + s16 critical; + s16 down_clock; +}; + +struct nouveau_pm { + struct drm_device *dev; + + struct nouveau_pm_voltage voltage; + struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL]; + int nr_perflvl; + struct nouveau_pm_temp_sensor_constants sensor_constants; + struct nouveau_pm_threshold_temp threshold_temp; + + struct nouveau_pm_profile *profile_ac; + struct nouveau_pm_profile *profile_dc; + struct nouveau_pm_profile *profile; + struct list_head profiles; + + struct nouveau_pm_level boot; + struct nouveau_pm_level *cur; + + struct device *hwmon; + struct notifier_block acpi_nb; + + int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *); + void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *); + int (*clocks_set)(struct drm_device *, void *); + + int (*voltage_get)(struct drm_device *); + int (*voltage_set)(struct drm_device *, int voltage); +}; + +static inline struct nouveau_pm * +nouveau_pm(struct drm_device *dev) +{ + return nouveau_drm(dev)->pm; +} + +struct nouveau_mem_exec_func { + struct drm_device *dev; + void (*precharge)(struct nouveau_mem_exec_func *); + void (*refresh)(struct nouveau_mem_exec_func *); + void (*refresh_auto)(struct nouveau_mem_exec_func *, bool); + void (*refresh_self)(struct nouveau_mem_exec_func *, bool); + void (*wait)(struct nouveau_mem_exec_func *, u32 nsec); + u32 (*mrg)(struct nouveau_mem_exec_func *, int mr); + void (*mrs)(struct nouveau_mem_exec_func *, int mr, u32 data); + void (*clock_set)(struct nouveau_mem_exec_func *); + void (*timing_set)(struct nouveau_mem_exec_func *); + void *priv; +}; + +/* nouveau_mem.c */ +int nouveau_mem_exec(struct nouveau_mem_exec_func *, + struct nouveau_pm_level *); + +/* nouveau_pm.c */ +int nouveau_pm_init(struct drm_device *dev); +void nouveau_pm_fini(struct drm_device *dev); +void nouveau_pm_resume(struct drm_device *dev); +extern const struct nouveau_pm_profile_func nouveau_pm_static_profile_func; +void nouveau_pm_trigger(struct drm_device *dev); + +/* nouveau_volt.c */ +void nouveau_volt_init(struct drm_device *); +void nouveau_volt_fini(struct drm_device *); +int nouveau_volt_vid_lookup(struct drm_device *, int voltage); +int nouveau_volt_lvl_lookup(struct drm_device *, int vid); +int nouveau_voltage_gpio_get(struct drm_device *); +int nouveau_voltage_gpio_set(struct drm_device *, int voltage); + +/* nouveau_perf.c */ +void nouveau_perf_init(struct drm_device *); +void nouveau_perf_fini(struct drm_device *); +u8 *nouveau_perf_rammap(struct drm_device *, u32 freq, u8 *ver, + u8 *hdr, u8 *cnt, u8 *len); +u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len); +u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len); + +/* nouveau_mem.c */ +void nouveau_mem_timing_init(struct drm_device *); +void nouveau_mem_timing_fini(struct drm_device *); + +/* nv04_pm.c */ +int nv04_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *); +void *nv04_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *); +int nv04_pm_clocks_set(struct drm_device *, void *); + +/* nv40_pm.c */ +int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *); +void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *); +int nv40_pm_clocks_set(struct drm_device *, void *); +int nv40_pm_pwm_get(struct drm_device *, int, u32 *, u32 *); +int nv40_pm_pwm_set(struct drm_device *, int, u32, u32); + +/* nv50_pm.c */ +int nv50_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *); +void *nv50_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *); +int nv50_pm_clocks_set(struct drm_device *, void *); +int nv50_pm_pwm_get(struct drm_device *, int, u32 *, u32 *); +int nv50_pm_pwm_set(struct drm_device *, int, u32, u32); + +/* nva3_pm.c */ +int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *); +void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *); +int nva3_pm_clocks_set(struct drm_device *, void *); + +/* nvc0_pm.c */ +int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *); +void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *); +int nvc0_pm_clocks_set(struct drm_device *, void *); + +/* nouveau_mem.c */ +int nouveau_mem_timing_calc(struct drm_device *, u32 freq, + struct nouveau_pm_memtiming *); +void nouveau_mem_timing_read(struct drm_device *, + struct nouveau_pm_memtiming *); + +static inline int +nva3_calc_pll(struct drm_device *dev, struct nvbios_pll *pll, u32 freq, + int *N, int *fN, int *M, int *P) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_clock *clk = nouveau_clock(device); + struct nouveau_pll_vals pv; + int ret; + + ret = clk->pll_calc(clk, pll, freq, &pv); + *N = pv.N1; + *M = pv.M1; + *P = pv.log2P; + return ret; +} + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c new file mode 100644 index 0000000..f53e108 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -0,0 +1,95 @@ +/* + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + */ + +#include <drm/drmP.h> + +#include "nouveau_drm.h" +#include "nouveau_gem.h" + +struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct nouveau_bo *nvbo = nouveau_gem_object(obj); + int npages = nvbo->bo.num_pages; + + return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages); +} + +void *nouveau_gem_prime_vmap(struct drm_gem_object *obj) +{ + struct nouveau_bo *nvbo = nouveau_gem_object(obj); + int ret; + + ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages, + &nvbo->dma_buf_vmap); + if (ret) + return ERR_PTR(ret); + + return nvbo->dma_buf_vmap.virtual; +} + +void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + struct nouveau_bo *nvbo = nouveau_gem_object(obj); + + ttm_bo_kunmap(&nvbo->dma_buf_vmap); +} + +struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, + size_t size, + struct sg_table *sg) +{ + struct nouveau_bo *nvbo; + u32 flags = 0; + int ret; + + flags = TTM_PL_FLAG_TT; + + ret = nouveau_bo_new(dev, size, 0, flags, 0, 0, + sg, &nvbo); + if (ret) + return ERR_PTR(ret); + + nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; + nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); + if (!nvbo->gem) { + nouveau_bo_ref(NULL, &nvbo); + return ERR_PTR(-ENOMEM); + } + + nvbo->gem->driver_private = nvbo; + return nvbo->gem; +} + +int nouveau_gem_prime_pin(struct drm_gem_object *obj) +{ + struct nouveau_bo *nvbo = nouveau_gem_object(obj); + int ret = 0; + + /* pin buffer into GTT */ + ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT); + if (ret) + return -EINVAL; + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h new file mode 100644 index 0000000..43a96b9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h @@ -0,0 +1,858 @@ + +#define NV04_PFB_BOOT_0 0x00100000 +# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003 +# define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB 0x00000000 +# define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB 0x00000001 +# define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB 0x00000002 +# define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB 0x00000003 +# define NV04_PFB_BOOT_0_RAM_WIDTH_128 0x00000004 +# define NV04_PFB_BOOT_0_RAM_TYPE 0x00000028 +# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT 0x00000000 +# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT 0x00000008 +# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK 0x00000010 +# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT 0x00000018 +# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT 0x00000020 +# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16 0x00000028 +# define NV04_PFB_BOOT_0_UMA_ENABLE 0x00000100 +# define NV04_PFB_BOOT_0_UMA_SIZE 0x0000f000 +#define NV04_PFB_DEBUG_0 0x00100080 +# define NV04_PFB_DEBUG_0_PAGE_MODE 0x00000001 +# define NV04_PFB_DEBUG_0_REFRESH_OFF 0x00000010 +# define NV04_PFB_DEBUG_0_REFRESH_COUNTX64 0x00003f00 +# define NV04_PFB_DEBUG_0_REFRESH_SLOW_CLK 0x00004000 +# define NV04_PFB_DEBUG_0_SAFE_MODE 0x00008000 +# define NV04_PFB_DEBUG_0_ALOM_ENABLE 0x00010000 +# define NV04_PFB_DEBUG_0_CASOE 0x00100000 +# define NV04_PFB_DEBUG_0_CKE_INVERT 0x10000000 +# define NV04_PFB_DEBUG_0_REFINC 0x20000000 +# define NV04_PFB_DEBUG_0_SAVE_POWER_OFF 0x40000000 +#define NV04_PFB_CFG0 0x00100200 +# define NV04_PFB_CFG0_SCRAMBLE 0x20000000 +#define NV04_PFB_CFG1 0x00100204 +#define NV04_PFB_FIFO_DATA 0x0010020c +# define NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000 +# define NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20 +#define NV10_PFB_REFCTRL 0x00100210 +# define NV10_PFB_REFCTRL_VALID_1 (1 << 31) +#define NV04_PFB_PAD 0x0010021c +# define NV04_PFB_PAD_CKE_NORMAL (1 << 0) +#define NV10_PFB_TILE(i) (0x00100240 + (i*16)) +#define NV10_PFB_TILE__SIZE 8 +#define NV10_PFB_TLIMIT(i) (0x00100244 + (i*16)) +#define NV10_PFB_TSIZE(i) (0x00100248 + (i*16)) +#define NV10_PFB_TSTATUS(i) (0x0010024c + (i*16)) +#define NV04_PFB_REF 0x001002d0 +# define NV04_PFB_REF_CMD_REFRESH (1 << 0) +#define NV04_PFB_PRE 0x001002d4 +# define NV04_PFB_PRE_CMD_PRECHARGE (1 << 0) +#define NV20_PFB_ZCOMP(i) (0x00100300 + 4*(i)) +# define NV20_PFB_ZCOMP_MODE_32 (4 << 24) +# define NV20_PFB_ZCOMP_EN (1 << 31) +# define NV25_PFB_ZCOMP_MODE_16 (1 << 20) +# define NV25_PFB_ZCOMP_MODE_32 (2 << 20) +#define NV10_PFB_CLOSE_PAGE2 0x0010033c +#define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i)) +#define NV40_PFB_TILE(i) (0x00100600 + (i*16)) +#define NV40_PFB_TILE__SIZE_0 12 +#define NV40_PFB_TILE__SIZE_1 15 +#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16)) +#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16)) +#define NV40_PFB_TSTATUS(i) (0x0010060c + (i*16)) +#define NV40_PFB_UNK_800 0x00100800 + +#define NV_PEXTDEV_BOOT_0 0x00101000 +#define NV_PEXTDEV_BOOT_0_RAMCFG 0x0000003c +# define NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT (8 << 12) +#define NV_PEXTDEV_BOOT_3 0x0010100c + +#define NV_RAMIN 0x00700000 + +#define NV_RAMHT_HANDLE_OFFSET 0 +#define NV_RAMHT_CONTEXT_OFFSET 4 +# define NV_RAMHT_CONTEXT_VALID (1<<31) +# define NV_RAMHT_CONTEXT_CHANNEL_SHIFT 24 +# define NV_RAMHT_CONTEXT_ENGINE_SHIFT 16 +# define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE 0 +# define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS 1 +# define NV_RAMHT_CONTEXT_INSTANCE_SHIFT 0 +# define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT 23 +# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20 +# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0 + +/* Some object classes we care about in the drm */ +#define NV_CLASS_DMA_FROM_MEMORY 0x00000002 +#define NV_CLASS_DMA_TO_MEMORY 0x00000003 +#define NV_CLASS_NULL 0x00000030 +#define NV_CLASS_DMA_IN_MEMORY 0x0000003D + +#define NV03_USER(i) (0x00800000+(i*NV03_USER_SIZE)) +#define NV03_USER__SIZE 16 +#define NV10_USER__SIZE 32 +#define NV03_USER_SIZE 0x00010000 +#define NV03_USER_DMA_PUT(i) (0x00800040+(i*NV03_USER_SIZE)) +#define NV03_USER_DMA_PUT__SIZE 16 +#define NV10_USER_DMA_PUT__SIZE 32 +#define NV03_USER_DMA_GET(i) (0x00800044+(i*NV03_USER_SIZE)) +#define NV03_USER_DMA_GET__SIZE 16 +#define NV10_USER_DMA_GET__SIZE 32 +#define NV03_USER_REF_CNT(i) (0x00800048+(i*NV03_USER_SIZE)) +#define NV03_USER_REF_CNT__SIZE 16 +#define NV10_USER_REF_CNT__SIZE 32 + +#define NV40_USER(i) (0x00c00000+(i*NV40_USER_SIZE)) +#define NV40_USER_SIZE 0x00001000 +#define NV40_USER_DMA_PUT(i) (0x00c00040+(i*NV40_USER_SIZE)) +#define NV40_USER_DMA_PUT__SIZE 32 +#define NV40_USER_DMA_GET(i) (0x00c00044+(i*NV40_USER_SIZE)) +#define NV40_USER_DMA_GET__SIZE 32 +#define NV40_USER_REF_CNT(i) (0x00c00048+(i*NV40_USER_SIZE)) +#define NV40_USER_REF_CNT__SIZE 32 + +#define NV50_USER(i) (0x00c00000+(i*NV50_USER_SIZE)) +#define NV50_USER_SIZE 0x00002000 +#define NV50_USER_DMA_PUT(i) (0x00c00040+(i*NV50_USER_SIZE)) +#define NV50_USER_DMA_PUT__SIZE 128 +#define NV50_USER_DMA_GET(i) (0x00c00044+(i*NV50_USER_SIZE)) +#define NV50_USER_DMA_GET__SIZE 128 +#define NV50_USER_REF_CNT(i) (0x00c00048+(i*NV50_USER_SIZE)) +#define NV50_USER_REF_CNT__SIZE 128 + +#define NV03_FIFO_SIZE 0x8000UL + +#define NV03_PMC_BOOT_0 0x00000000 +#define NV03_PMC_BOOT_1 0x00000004 +#define NV03_PMC_INTR_0 0x00000100 +# define NV_PMC_INTR_0_PFIFO_PENDING (1<<8) +# define NV_PMC_INTR_0_PGRAPH_PENDING (1<<12) +# define NV_PMC_INTR_0_NV50_I2C_PENDING (1<<21) +# define NV_PMC_INTR_0_CRTC0_PENDING (1<<24) +# define NV_PMC_INTR_0_CRTC1_PENDING (1<<25) +# define NV_PMC_INTR_0_NV50_DISPLAY_PENDING (1<<26) +# define NV_PMC_INTR_0_CRTCn_PENDING (3<<24) +#define NV03_PMC_INTR_EN_0 0x00000140 +# define NV_PMC_INTR_EN_0_MASTER_ENABLE (1<<0) +#define NV03_PMC_ENABLE 0x00000200 +# define NV_PMC_ENABLE_PFIFO (1<<8) +# define NV_PMC_ENABLE_PGRAPH (1<<12) +/* Disabling the below bit breaks newer (G7X only?) mobile chipsets, + * the card will hang early on in the X init process. + */ +# define NV_PMC_ENABLE_UNK13 (1<<13) +#define NV40_PMC_GRAPH_UNITS 0x00001540 +#define NV40_PMC_BACKLIGHT 0x000015f0 +# define NV40_PMC_BACKLIGHT_MASK 0x001f0000 +#define NV40_PMC_1700 0x00001700 +#define NV40_PMC_1704 0x00001704 +#define NV40_PMC_1708 0x00001708 +#define NV40_PMC_170C 0x0000170C + +/* probably PMC ? */ +#define NV50_PUNK_BAR0_PRAMIN 0x00001700 +#define NV50_PUNK_BAR_CFG_BASE 0x00001704 +#define NV50_PUNK_BAR_CFG_BASE_VALID (1<<30) +#define NV50_PUNK_BAR1_CTXDMA 0x00001708 +#define NV50_PUNK_BAR1_CTXDMA_VALID (1<<31) +#define NV50_PUNK_BAR3_CTXDMA 0x0000170C +#define NV50_PUNK_BAR3_CTXDMA_VALID (1<<31) +#define NV50_PUNK_UNK1710 0x00001710 + +#define NV04_PBUS_PCI_NV_1 0x00001804 +#define NV04_PBUS_PCI_NV_19 0x0000184C +#define NV04_PBUS_PCI_NV_20 0x00001850 +# define NV04_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED (0 << 0) +# define NV04_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED (1 << 0) + +#define NV04_PTIMER_INTR_0 0x00009100 +#define NV04_PTIMER_INTR_EN_0 0x00009140 +#define NV04_PTIMER_NUMERATOR 0x00009200 +#define NV04_PTIMER_DENOMINATOR 0x00009210 +#define NV04_PTIMER_TIME_0 0x00009400 +#define NV04_PTIMER_TIME_1 0x00009410 +#define NV04_PTIMER_ALARM_0 0x00009420 + +#define NV04_PGRAPH_DEBUG_0 0x00400080 +#define NV04_PGRAPH_DEBUG_1 0x00400084 +#define NV04_PGRAPH_DEBUG_2 0x00400088 +#define NV04_PGRAPH_DEBUG_3 0x0040008c +#define NV10_PGRAPH_DEBUG_4 0x00400090 +#define NV03_PGRAPH_INTR 0x00400100 +#define NV03_PGRAPH_NSTATUS 0x00400104 +# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11) +# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12) +# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13) +# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14) +# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23) +# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24) +# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25) +# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26) +#define NV03_PGRAPH_NSOURCE 0x00400108 +# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<<0) +# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<<1) +# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<<2) +# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<<3) +# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<<4) +# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<<5) +# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<<6) +# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<<7) +# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<<8) +# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<<9) +# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10) +# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11) +# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12) +# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13) +# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14) +# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15) +# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16) +# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17) +# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18) +#define NV03_PGRAPH_INTR_EN 0x00400140 +#define NV40_PGRAPH_INTR_EN 0x0040013C +# define NV_PGRAPH_INTR_NOTIFY (1<<0) +# define NV_PGRAPH_INTR_MISSING_HW (1<<4) +# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12) +# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16) +# define NV_PGRAPH_INTR_ERROR (1<<20) +#define NV10_PGRAPH_CTX_CONTROL 0x00400144 +#define NV10_PGRAPH_CTX_USER 0x00400148 +#define NV10_PGRAPH_CTX_SWITCH(i) (0x0040014C + 0x4*(i)) +#define NV04_PGRAPH_CTX_SWITCH1 0x00400160 +#define NV10_PGRAPH_CTX_CACHE(i, j) (0x00400160 \ + + 0x4*(i) + 0x20*(j)) +#define NV04_PGRAPH_CTX_SWITCH2 0x00400164 +#define NV04_PGRAPH_CTX_SWITCH3 0x00400168 +#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C +#define NV04_PGRAPH_CTX_CONTROL 0x00400170 +#define NV04_PGRAPH_CTX_USER 0x00400174 +#define NV04_PGRAPH_CTX_CACHE1 0x00400180 +#define NV03_PGRAPH_CTX_CONTROL 0x00400190 +#define NV03_PGRAPH_CTX_USER 0x00400194 +#define NV04_PGRAPH_CTX_CACHE2 0x004001A0 +#define NV04_PGRAPH_CTX_CACHE3 0x004001C0 +#define NV04_PGRAPH_CTX_CACHE4 0x004001E0 +#define NV40_PGRAPH_CTXCTL_0304 0x00400304 +#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff +#define NV40_PGRAPH_CTXCTL_0310 0x00400310 +#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020 +#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040 +#define NV40_PGRAPH_CTXCTL_030C 0x0040030c +#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324 +#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328 +#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c +#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000 +#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE 0x000FFFFF +#define NV40_PGRAPH_CTXCTL_NEXT 0x00400330 +#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE 0x000fffff +#define NV50_PGRAPH_CTXCTL_CUR 0x0040032c +#define NV50_PGRAPH_CTXCTL_CUR_LOADED 0x80000000 +#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE 0x00ffffff +#define NV50_PGRAPH_CTXCTL_NEXT 0x00400330 +#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE 0x00ffffff +#define NV03_PGRAPH_ABS_X_RAM 0x00400400 +#define NV03_PGRAPH_ABS_Y_RAM 0x00400480 +#define NV03_PGRAPH_X_MISC 0x00400500 +#define NV03_PGRAPH_Y_MISC 0x00400504 +#define NV04_PGRAPH_VALID1 0x00400508 +#define NV04_PGRAPH_SOURCE_COLOR 0x0040050C +#define NV04_PGRAPH_MISC24_0 0x00400510 +#define NV03_PGRAPH_XY_LOGIC_MISC0 0x00400514 +#define NV03_PGRAPH_XY_LOGIC_MISC1 0x00400518 +#define NV03_PGRAPH_XY_LOGIC_MISC2 0x0040051C +#define NV03_PGRAPH_XY_LOGIC_MISC3 0x00400520 +#define NV03_PGRAPH_CLIPX_0 0x00400524 +#define NV03_PGRAPH_CLIPX_1 0x00400528 +#define NV03_PGRAPH_CLIPY_0 0x0040052C +#define NV03_PGRAPH_CLIPY_1 0x00400530 +#define NV03_PGRAPH_ABS_ICLIP_XMAX 0x00400534 +#define NV03_PGRAPH_ABS_ICLIP_YMAX 0x00400538 +#define NV03_PGRAPH_ABS_UCLIP_XMIN 0x0040053C +#define NV03_PGRAPH_ABS_UCLIP_YMIN 0x00400540 +#define NV03_PGRAPH_ABS_UCLIP_XMAX 0x00400544 +#define NV03_PGRAPH_ABS_UCLIP_YMAX 0x00400548 +#define NV03_PGRAPH_ABS_UCLIPA_XMIN 0x00400560 +#define NV03_PGRAPH_ABS_UCLIPA_YMIN 0x00400564 +#define NV03_PGRAPH_ABS_UCLIPA_XMAX 0x00400568 +#define NV03_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C +#define NV04_PGRAPH_MISC24_1 0x00400570 +#define NV04_PGRAPH_MISC24_2 0x00400574 +#define NV04_PGRAPH_VALID2 0x00400578 +#define NV04_PGRAPH_PASSTHRU_0 0x0040057C +#define NV04_PGRAPH_PASSTHRU_1 0x00400580 +#define NV04_PGRAPH_PASSTHRU_2 0x00400584 +#define NV10_PGRAPH_DIMX_TEXTURE 0x00400588 +#define NV10_PGRAPH_WDIMX_TEXTURE 0x0040058C +#define NV04_PGRAPH_COMBINE_0_ALPHA 0x00400590 +#define NV04_PGRAPH_COMBINE_0_COLOR 0x00400594 +#define NV04_PGRAPH_COMBINE_1_ALPHA 0x00400598 +#define NV04_PGRAPH_COMBINE_1_COLOR 0x0040059C +#define NV04_PGRAPH_FORMAT_0 0x004005A8 +#define NV04_PGRAPH_FORMAT_1 0x004005AC +#define NV04_PGRAPH_FILTER_0 0x004005B0 +#define NV04_PGRAPH_FILTER_1 0x004005B4 +#define NV03_PGRAPH_MONO_COLOR0 0x00400600 +#define NV04_PGRAPH_ROP3 0x00400604 +#define NV04_PGRAPH_BETA_AND 0x00400608 +#define NV04_PGRAPH_BETA_PREMULT 0x0040060C +#define NV04_PGRAPH_LIMIT_VIOL_PIX 0x00400610 +#define NV04_PGRAPH_FORMATS 0x00400618 +#define NV10_PGRAPH_DEBUG_2 0x00400620 +#define NV04_PGRAPH_BOFFSET0 0x00400640 +#define NV04_PGRAPH_BOFFSET1 0x00400644 +#define NV04_PGRAPH_BOFFSET2 0x00400648 +#define NV04_PGRAPH_BOFFSET3 0x0040064C +#define NV04_PGRAPH_BOFFSET4 0x00400650 +#define NV04_PGRAPH_BOFFSET5 0x00400654 +#define NV04_PGRAPH_BBASE0 0x00400658 +#define NV04_PGRAPH_BBASE1 0x0040065C +#define NV04_PGRAPH_BBASE2 0x00400660 +#define NV04_PGRAPH_BBASE3 0x00400664 +#define NV04_PGRAPH_BBASE4 0x00400668 +#define NV04_PGRAPH_BBASE5 0x0040066C +#define NV04_PGRAPH_BPITCH0 0x00400670 +#define NV04_PGRAPH_BPITCH1 0x00400674 +#define NV04_PGRAPH_BPITCH2 0x00400678 +#define NV04_PGRAPH_BPITCH3 0x0040067C +#define NV04_PGRAPH_BPITCH4 0x00400680 +#define NV04_PGRAPH_BLIMIT0 0x00400684 +#define NV04_PGRAPH_BLIMIT1 0x00400688 +#define NV04_PGRAPH_BLIMIT2 0x0040068C +#define NV04_PGRAPH_BLIMIT3 0x00400690 +#define NV04_PGRAPH_BLIMIT4 0x00400694 +#define NV04_PGRAPH_BLIMIT5 0x00400698 +#define NV04_PGRAPH_BSWIZZLE2 0x0040069C +#define NV04_PGRAPH_BSWIZZLE5 0x004006A0 +#define NV03_PGRAPH_STATUS 0x004006B0 +#define NV04_PGRAPH_STATUS 0x00400700 +# define NV40_PGRAPH_STATUS_SYNC_STALL 0x00004000 +#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704 +#define NV04_PGRAPH_TRAPPED_DATA 0x00400708 +#define NV04_PGRAPH_SURFACE 0x0040070C +#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C +#define NV04_PGRAPH_STATE 0x00400710 +#define NV10_PGRAPH_SURFACE 0x00400710 +#define NV04_PGRAPH_NOTIFY 0x00400714 +#define NV10_PGRAPH_STATE 0x00400714 +#define NV10_PGRAPH_NOTIFY 0x00400718 + +#define NV04_PGRAPH_FIFO 0x00400720 + +#define NV04_PGRAPH_BPIXEL 0x00400724 +#define NV10_PGRAPH_RDI_INDEX 0x00400750 +#define NV04_PGRAPH_FFINTFC_ST2 0x00400754 +#define NV10_PGRAPH_RDI_DATA 0x00400754 +#define NV04_PGRAPH_DMA_PITCH 0x00400760 +#define NV10_PGRAPH_FFINTFC_FIFO_PTR 0x00400760 +#define NV04_PGRAPH_DVD_COLORFMT 0x00400764 +#define NV10_PGRAPH_FFINTFC_ST2 0x00400764 +#define NV04_PGRAPH_SCALED_FORMAT 0x00400768 +#define NV10_PGRAPH_FFINTFC_ST2_DL 0x00400768 +#define NV10_PGRAPH_FFINTFC_ST2_DH 0x0040076c +#define NV10_PGRAPH_DMA_PITCH 0x00400770 +#define NV10_PGRAPH_DVD_COLORFMT 0x00400774 +#define NV10_PGRAPH_SCALED_FORMAT 0x00400778 +#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780 +#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784 +#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788 +#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001 +#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002 +#define NV04_PGRAPH_PATT_COLOR0 0x00400800 +#define NV04_PGRAPH_PATT_COLOR1 0x00400804 +#define NV04_PGRAPH_PATTERN 0x00400808 +#define NV04_PGRAPH_PATTERN_SHAPE 0x00400810 +#define NV04_PGRAPH_CHROMA 0x00400814 +#define NV04_PGRAPH_CONTROL0 0x00400818 +#define NV04_PGRAPH_CONTROL1 0x0040081C +#define NV04_PGRAPH_CONTROL2 0x00400820 +#define NV04_PGRAPH_BLEND 0x00400824 +#define NV04_PGRAPH_STORED_FMT 0x00400830 +#define NV04_PGRAPH_PATT_COLORRAM 0x00400900 +#define NV20_PGRAPH_TILE(i) (0x00400900 + (i*16)) +#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16)) +#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16)) +#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16)) +#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i)) +#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) +#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) +#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) +#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16)) +#define NV04_PGRAPH_U_RAM 0x00400D00 +#define NV47_PGRAPH_TILE(i) (0x00400D00 + (i*16)) +#define NV47_PGRAPH_TLIMIT(i) (0x00400D04 + (i*16)) +#define NV47_PGRAPH_TSIZE(i) (0x00400D08 + (i*16)) +#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16)) +#define NV04_PGRAPH_V_RAM 0x00400D40 +#define NV04_PGRAPH_W_RAM 0x00400D80 +#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40 +#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44 +#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48 +#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C +#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50 +#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54 +#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58 +#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C +#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60 +#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64 +#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68 +#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C +#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00 +#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20 +#define NV10_PGRAPH_XFMODE0 0x00400F40 +#define NV10_PGRAPH_XFMODE1 0x00400F44 +#define NV10_PGRAPH_GLOBALSTATE0 0x00400F48 +#define NV10_PGRAPH_GLOBALSTATE1 0x00400F4C +#define NV10_PGRAPH_PIPE_ADDRESS 0x00400F50 +#define NV10_PGRAPH_PIPE_DATA 0x00400F54 +#define NV04_PGRAPH_DMA_START_0 0x00401000 +#define NV04_PGRAPH_DMA_START_1 0x00401004 +#define NV04_PGRAPH_DMA_LENGTH 0x00401008 +#define NV04_PGRAPH_DMA_MISC 0x0040100C +#define NV04_PGRAPH_DMA_DATA_0 0x00401020 +#define NV04_PGRAPH_DMA_DATA_1 0x00401024 +#define NV04_PGRAPH_DMA_RM 0x00401030 +#define NV04_PGRAPH_DMA_A_XLATE_INST 0x00401040 +#define NV04_PGRAPH_DMA_A_CONTROL 0x00401044 +#define NV04_PGRAPH_DMA_A_LIMIT 0x00401048 +#define NV04_PGRAPH_DMA_A_TLB_PTE 0x0040104C +#define NV04_PGRAPH_DMA_A_TLB_TAG 0x00401050 +#define NV04_PGRAPH_DMA_A_ADJ_OFFSET 0x00401054 +#define NV04_PGRAPH_DMA_A_OFFSET 0x00401058 +#define NV04_PGRAPH_DMA_A_SIZE 0x0040105C +#define NV04_PGRAPH_DMA_A_Y_SIZE 0x00401060 +#define NV04_PGRAPH_DMA_B_XLATE_INST 0x00401080 +#define NV04_PGRAPH_DMA_B_CONTROL 0x00401084 +#define NV04_PGRAPH_DMA_B_LIMIT 0x00401088 +#define NV04_PGRAPH_DMA_B_TLB_PTE 0x0040108C +#define NV04_PGRAPH_DMA_B_TLB_TAG 0x00401090 +#define NV04_PGRAPH_DMA_B_ADJ_OFFSET 0x00401094 +#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098 +#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C +#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0 +#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16)) +#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16)) +#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16)) +#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16)) + + +/* It's a guess that this works on NV03. Confirmed on NV04, though */ +#define NV04_PFIFO_DELAY_0 0x00002040 +#define NV04_PFIFO_DMA_TIMESLICE 0x00002044 +#define NV04_PFIFO_NEXT_CHANNEL 0x00002050 +#define NV03_PFIFO_INTR_0 0x00002100 +#define NV03_PFIFO_INTR_EN_0 0x00002140 +# define NV_PFIFO_INTR_CACHE_ERROR (1<<0) +# define NV_PFIFO_INTR_RUNOUT (1<<4) +# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8) +# define NV_PFIFO_INTR_DMA_PUSHER (1<<12) +# define NV_PFIFO_INTR_DMA_PT (1<<16) +# define NV_PFIFO_INTR_SEMAPHORE (1<<20) +# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24) +#define NV03_PFIFO_RAMHT 0x00002210 +#define NV03_PFIFO_RAMFC 0x00002214 +#define NV03_PFIFO_RAMRO 0x00002218 +#define NV40_PFIFO_RAMFC 0x00002220 +#define NV03_PFIFO_CACHES 0x00002500 +#define NV04_PFIFO_MODE 0x00002504 +#define NV04_PFIFO_DMA 0x00002508 +#define NV04_PFIFO_SIZE 0x0000250c +#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4) +#define NV50_PFIFO_CTX_TABLE__SIZE 128 +#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31) +#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30) +#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF +#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF +#define NV03_PFIFO_CACHE0_PUSH0 0x00003000 +#define NV03_PFIFO_CACHE0_PULL0 0x00003040 +#define NV04_PFIFO_CACHE0_PULL0 0x00003050 +#define NV04_PFIFO_CACHE0_PULL1 0x00003054 +#define NV03_PFIFO_CACHE1_PUSH0 0x00003200 +#define NV03_PFIFO_CACHE1_PUSH1 0x00003204 +#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8) +#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16) +#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f +#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f +#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f +#define NV03_PFIFO_CACHE1_PUT 0x00003210 +#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220 +#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0 +# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000 +# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000 +# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000 +# define NV_PFIFO_CACHE1_ENDIAN 0x80000000 +# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF +# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000 +#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228 +#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c +#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230 +#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240 +#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244 +#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248 +#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C +#define NV03_PFIFO_CACHE1_PULL0 0x00003240 +#define NV04_PFIFO_CACHE1_PULL0 0x00003250 +# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010 +# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000 +#define NV03_PFIFO_CACHE1_PULL1 0x00003250 +#define NV04_PFIFO_CACHE1_PULL1 0x00003254 +#define NV04_PFIFO_CACHE1_HASH 0x00003258 +#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260 +#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264 +#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268 +#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C +#define NV03_PFIFO_CACHE1_GET 0x00003270 +#define NV04_PFIFO_CACHE1_ENGINE 0x00003280 +#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0 +#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0 +#define NV40_PFIFO_UNK32E4 0x000032E4 +#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8)) +#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8)) +#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8)) +#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8)) + +#define NV_CRTC0_INTSTAT 0x00600100 +#define NV_CRTC0_INTEN 0x00600140 +#define NV_CRTC1_INTSTAT 0x00602100 +#define NV_CRTC1_INTEN 0x00602140 +# define NV_CRTC_INTR_VBLANK (1<<0) + +#define NV04_PRAMIN 0x00700000 + +/* Fifo commands. These are not regs, neither masks */ +#define NV03_FIFO_CMD_JUMP 0x20000000 +#define NV03_FIFO_CMD_JUMP_OFFSET_MASK 0x1ffffffc +#define NV03_FIFO_CMD_REWIND (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK)) + +/* This is a partial import from rules-ng, a few things may be duplicated. + * Eventually we should completely import everything from rules-ng. + * For the moment check rules-ng for docs. + */ + +#define NV50_PMC 0x00000000 +#define NV50_PMC__LEN 0x1 +#define NV50_PMC__ESIZE 0x2000 +# define NV50_PMC_BOOT_0 0x00000000 +# define NV50_PMC_BOOT_0_REVISION 0x000000ff +# define NV50_PMC_BOOT_0_REVISION__SHIFT 0 +# define NV50_PMC_BOOT_0_ARCH 0x0ff00000 +# define NV50_PMC_BOOT_0_ARCH__SHIFT 20 +# define NV50_PMC_INTR_0 0x00000100 +# define NV50_PMC_INTR_0_PFIFO (1<<8) +# define NV50_PMC_INTR_0_PGRAPH (1<<12) +# define NV50_PMC_INTR_0_PTIMER (1<<20) +# define NV50_PMC_INTR_0_HOTPLUG (1<<21) +# define NV50_PMC_INTR_0_DISPLAY (1<<26) +# define NV50_PMC_INTR_EN_0 0x00000140 +# define NV50_PMC_INTR_EN_0_MASTER (1<<0) +# define NV50_PMC_INTR_EN_0_MASTER_DISABLED (0<<0) +# define NV50_PMC_INTR_EN_0_MASTER_ENABLED (1<<0) +# define NV50_PMC_ENABLE 0x00000200 +# define NV50_PMC_ENABLE_PFIFO (1<<8) +# define NV50_PMC_ENABLE_PGRAPH (1<<12) + +#define NV50_PCONNECTOR 0x0000e000 +#define NV50_PCONNECTOR__LEN 0x1 +#define NV50_PCONNECTOR__ESIZE 0x1000 +# define NV50_PCONNECTOR_HOTPLUG_INTR 0x0000e050 +# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C0 (1<<0) +# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C1 (1<<1) +# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C2 (1<<2) +# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C3 (1<<3) +# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C0 (1<<16) +# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C1 (1<<17) +# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C2 (1<<18) +# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C3 (1<<19) +# define NV50_PCONNECTOR_HOTPLUG_CTRL 0x0000e054 +# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C0 (1<<0) +# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C1 (1<<1) +# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C2 (1<<2) +# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C3 (1<<3) +# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C0 (1<<16) +# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C1 (1<<17) +# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C2 (1<<18) +# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C3 (1<<19) +# define NV50_PCONNECTOR_HOTPLUG_STATE 0x0000e104 +# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C0 (1<<2) +# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C1 (1<<6) +# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C2 (1<<10) +# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C3 (1<<14) +# define NV50_PCONNECTOR_I2C_PORT_0 0x0000e138 +# define NV50_PCONNECTOR_I2C_PORT_1 0x0000e150 +# define NV50_PCONNECTOR_I2C_PORT_2 0x0000e168 +# define NV50_PCONNECTOR_I2C_PORT_3 0x0000e180 +# define NV50_PCONNECTOR_I2C_PORT_4 0x0000e240 +# define NV50_PCONNECTOR_I2C_PORT_5 0x0000e258 + +#define NV50_AUXCH_DATA_OUT(i, n) ((n) * 4 + (i) * 0x50 + 0x0000e4c0) +#define NV50_AUXCH_DATA_OUT__SIZE 4 +#define NV50_AUXCH_DATA_IN(i, n) ((n) * 4 + (i) * 0x50 + 0x0000e4d0) +#define NV50_AUXCH_DATA_IN__SIZE 4 +#define NV50_AUXCH_ADDR(i) ((i) * 0x50 + 0x0000e4e0) +#define NV50_AUXCH_CTRL(i) ((i) * 0x50 + 0x0000e4e4) +#define NV50_AUXCH_CTRL_LINKSTAT 0x01000000 +#define NV50_AUXCH_CTRL_LINKSTAT_NOT_READY 0x00000000 +#define NV50_AUXCH_CTRL_LINKSTAT_READY 0x01000000 +#define NV50_AUXCH_CTRL_LINKEN 0x00100000 +#define NV50_AUXCH_CTRL_LINKEN_DISABLED 0x00000000 +#define NV50_AUXCH_CTRL_LINKEN_ENABLED 0x00100000 +#define NV50_AUXCH_CTRL_EXEC 0x00010000 +#define NV50_AUXCH_CTRL_EXEC_COMPLETE 0x00000000 +#define NV50_AUXCH_CTRL_EXEC_IN_PROCESS 0x00010000 +#define NV50_AUXCH_CTRL_CMD 0x0000f000 +#define NV50_AUXCH_CTRL_CMD_SHIFT 12 +#define NV50_AUXCH_CTRL_LEN 0x0000000f +#define NV50_AUXCH_CTRL_LEN_SHIFT 0 +#define NV50_AUXCH_STAT(i) ((i) * 0x50 + 0x0000e4e8) +#define NV50_AUXCH_STAT_STATE 0x10000000 +#define NV50_AUXCH_STAT_STATE_NOT_READY 0x00000000 +#define NV50_AUXCH_STAT_STATE_READY 0x10000000 +#define NV50_AUXCH_STAT_REPLY 0x000f0000 +#define NV50_AUXCH_STAT_REPLY_AUX 0x00030000 +#define NV50_AUXCH_STAT_REPLY_AUX_ACK 0x00000000 +#define NV50_AUXCH_STAT_REPLY_AUX_NACK 0x00010000 +#define NV50_AUXCH_STAT_REPLY_AUX_DEFER 0x00020000 +#define NV50_AUXCH_STAT_REPLY_I2C 0x000c0000 +#define NV50_AUXCH_STAT_REPLY_I2C_ACK 0x00000000 +#define NV50_AUXCH_STAT_REPLY_I2C_NACK 0x00040000 +#define NV50_AUXCH_STAT_REPLY_I2C_DEFER 0x00080000 +#define NV50_AUXCH_STAT_COUNT 0x0000001f + +#define NV50_PBUS 0x00088000 +#define NV50_PBUS__LEN 0x1 +#define NV50_PBUS__ESIZE 0x1000 +# define NV50_PBUS_PCI_ID 0x00088000 +# define NV50_PBUS_PCI_ID_VENDOR_ID 0x0000ffff +# define NV50_PBUS_PCI_ID_VENDOR_ID__SHIFT 0 +# define NV50_PBUS_PCI_ID_DEVICE_ID 0xffff0000 +# define NV50_PBUS_PCI_ID_DEVICE_ID__SHIFT 16 + +#define NV50_PFB 0x00100000 +#define NV50_PFB__LEN 0x1 +#define NV50_PFB__ESIZE 0x1000 + +#define NV50_PEXTDEV 0x00101000 +#define NV50_PEXTDEV__LEN 0x1 +#define NV50_PEXTDEV__ESIZE 0x1000 + +#define NV50_PROM 0x00300000 +#define NV50_PROM__LEN 0x1 +#define NV50_PROM__ESIZE 0x10000 + +#define NV50_PGRAPH 0x00400000 +#define NV50_PGRAPH__LEN 0x1 +#define NV50_PGRAPH__ESIZE 0x10000 + +#define NV50_PDISPLAY 0x00610000 +#define NV50_PDISPLAY_OBJECTS 0x00610010 +#define NV50_PDISPLAY_INTR_0 0x00610020 +#define NV50_PDISPLAY_INTR_1 0x00610024 +#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC 0x0000000c +#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_SHIFT 2 +#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(n) (1 << ((n) + 2)) +#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0 0x00000004 +#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1 0x00000008 +#define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010 +#define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020 +#define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040 +#define NV50_PDISPLAY_INTR_EN_0 0x00610028 +#define NV50_PDISPLAY_INTR_EN_1 0x0061002c +#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC 0x0000000c +#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(n) (1 << ((n) + 2)) +#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_0 0x00000004 +#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_1 0x00000008 +#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 0x00000010 +#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 0x00000020 +#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK40 0x00000040 +#define NV50_PDISPLAY_UNK30_CTRL 0x00610030 +#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200 +#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400 +#define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000 +#define NV50_PDISPLAY_TRAPPED_ADDR(i) ((i) * 0x08 + 0x00610080) +#define NV50_PDISPLAY_TRAPPED_DATA(i) ((i) * 0x08 + 0x00610084) +#define NV50_PDISPLAY_EVO_CTRL(i) ((i) * 0x10 + 0x00610200) +#define NV50_PDISPLAY_EVO_CTRL_DMA 0x00000010 +#define NV50_PDISPLAY_EVO_CTRL_DMA_DISABLED 0x00000000 +#define NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED 0x00000010 +#define NV50_PDISPLAY_EVO_DMA_CB(i) ((i) * 0x10 + 0x00610204) +#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION 0x00000002 +#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM 0x00000000 +#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_SYSTEM 0x00000002 +#define NV50_PDISPLAY_EVO_DMA_CB_VALID 0x00000001 +#define NV50_PDISPLAY_EVO_UNK2(i) ((i) * 0x10 + 0x00610208) +#define NV50_PDISPLAY_EVO_HASH_TAG(i) ((i) * 0x10 + 0x0061020c) + +#define NV50_PDISPLAY_CURSOR 0x00610270 +#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270) +#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON 0x00000001 +#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000 +#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000 + +#define NV50_PDISPLAY_PIO_CTRL 0x00610300 +#define NV50_PDISPLAY_PIO_CTRL_PENDING 0x80000000 +#define NV50_PDISPLAY_PIO_CTRL_MTHD 0x00001ffc +#define NV50_PDISPLAY_PIO_CTRL_ENABLED 0x00000001 +#define NV50_PDISPLAY_PIO_DATA 0x00610304 + +#define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r) +#define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r) +#define NV50_PDISPLAY_CRTC_UNK_0A18 /* mthd 0x0900 */ 0x00610a18 +#define NV50_PDISPLAY_CRTC_CLUT_MODE 0x00610a24 +#define NV50_PDISPLAY_CRTC_INTERLACE 0x00610a48 +#define NV50_PDISPLAY_CRTC_SCALE_CTRL 0x00610a50 +#define NV50_PDISPLAY_CRTC_CURSOR_CTRL 0x00610a58 +#define NV50_PDISPLAY_CRTC_UNK0A78 /* mthd 0x0904 */ 0x00610a78 +#define NV50_PDISPLAY_CRTC_UNK0AB8 0x00610ab8 +#define NV50_PDISPLAY_CRTC_DEPTH 0x00610ac8 +#define NV50_PDISPLAY_CRTC_CLOCK 0x00610ad0 +#define NV50_PDISPLAY_CRTC_COLOR_CTRL 0x00610ae0 +#define NV50_PDISPLAY_CRTC_SYNC_START_TO_BLANK_END 0x00610ae8 +#define NV50_PDISPLAY_CRTC_MODE_UNK1 0x00610af0 +#define NV50_PDISPLAY_CRTC_DISPLAY_TOTAL 0x00610af8 +#define NV50_PDISPLAY_CRTC_SYNC_DURATION 0x00610b00 +#define NV50_PDISPLAY_CRTC_MODE_UNK2 0x00610b08 +#define NV50_PDISPLAY_CRTC_UNK_0B10 /* mthd 0x0828 */ 0x00610b10 +#define NV50_PDISPLAY_CRTC_FB_SIZE 0x00610b18 +#define NV50_PDISPLAY_CRTC_FB_PITCH 0x00610b20 +#define NV50_PDISPLAY_CRTC_FB_PITCH_LINEAR 0x00100000 +#define NV50_PDISPLAY_CRTC_FB_POS 0x00610b28 +#define NV50_PDISPLAY_CRTC_SCALE_CENTER_OFFSET 0x00610b38 +#define NV50_PDISPLAY_CRTC_REAL_RES 0x00610b40 +#define NV50_PDISPLAY_CRTC_SCALE_RES1 0x00610b48 +#define NV50_PDISPLAY_CRTC_SCALE_RES2 0x00610b50 + +#define NV50_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8) +#define NV50_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8) +#define NV50_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610b70 + (i) * 0x8) +#define NV50_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610b74 + (i) * 0x8) +#define NV50_PDISPLAY_EXT_MODE_CTRL_P(i) (0x00610b80 + (i) * 0x8) +#define NV50_PDISPLAY_EXT_MODE_CTRL_C(i) (0x00610b84 + (i) * 0x8) +#define NV50_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610bdc + (i) * 0x8) +#define NV50_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610be0 + (i) * 0x8) +#define NV90_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610794 + (i) * 0x8) +#define NV90_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610798 + (i) * 0x8) + +#define NV50_PDISPLAY_CRTC_CLK 0x00614000 +#define NV50_PDISPLAY_CRTC_CLK_CTRL1(i) ((i) * 0x800 + 0x614100) +#define NV50_PDISPLAY_CRTC_CLK_CTRL1_CONNECTED 0x00000600 +#define NV50_PDISPLAY_CRTC_CLK_VPLL_A(i) ((i) * 0x800 + 0x614104) +#define NV50_PDISPLAY_CRTC_CLK_VPLL_B(i) ((i) * 0x800 + 0x614108) +#define NV50_PDISPLAY_CRTC_CLK_CTRL2(i) ((i) * 0x800 + 0x614200) + +#define NV50_PDISPLAY_DAC_CLK 0x00614000 +#define NV50_PDISPLAY_DAC_CLK_CTRL2(i) ((i) * 0x800 + 0x614280) + +#define NV50_PDISPLAY_SOR_CLK 0x00614000 +#define NV50_PDISPLAY_SOR_CLK_CTRL2(i) ((i) * 0x800 + 0x614300) + +#define NV50_PDISPLAY_VGACRTC(r) ((r) + 0x619400) + +#define NV50_PDISPLAY_DAC 0x0061a000 +#define NV50_PDISPLAY_DAC_DPMS_CTRL(i) (0x0061a004 + (i) * 0x800) +#define NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF 0x00000001 +#define NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF 0x00000004 +#define NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED 0x00000010 +#define NV50_PDISPLAY_DAC_DPMS_CTRL_OFF 0x00000040 +#define NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING 0x80000000 +#define NV50_PDISPLAY_DAC_LOAD_CTRL(i) (0x0061a00c + (i) * 0x800) +#define NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE 0x00100000 +#define NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT 0x38000000 +#define NV50_PDISPLAY_DAC_LOAD_CTRL_DONE 0x80000000 +#define NV50_PDISPLAY_DAC_CLK_CTRL1(i) (0x0061a010 + (i) * 0x800) +#define NV50_PDISPLAY_DAC_CLK_CTRL1_CONNECTED 0x00000600 + +#define NV50_PDISPLAY_SOR 0x0061c000 +#define NV50_PDISPLAY_SOR_DPMS_CTRL(i) (0x0061c004 + (i) * 0x800) +#define NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING 0x80000000 +#define NV50_PDISPLAY_SOR_DPMS_CTRL_ON 0x00000001 +#define NV50_PDISPLAY_SOR_CLK_CTRL1(i) (0x0061c008 + (i) * 0x800) +#define NV50_PDISPLAY_SOR_CLK_CTRL1_CONNECTED 0x00000600 +#define NV50_PDISPLAY_SOR_DPMS_STATE(i) (0x0061c030 + (i) * 0x800) +#define NV50_PDISPLAY_SOR_DPMS_STATE_ACTIVE 0x00030000 +#define NV50_PDISPLAY_SOR_DPMS_STATE_BLANKED 0x00080000 +#define NV50_PDISPLAY_SOR_DPMS_STATE_WAIT 0x10000000 +#define NV50_PDISP_SOR_PWM_DIV(i) (0x0061c080 + (i) * 0x800) +#define NV50_PDISP_SOR_PWM_CTL(i) (0x0061c084 + (i) * 0x800) +#define NV50_PDISP_SOR_PWM_CTL_NEW 0x80000000 +#define NVA3_PDISP_SOR_PWM_CTL_UNK 0x40000000 +#define NV50_PDISP_SOR_PWM_CTL_VAL 0x000007ff +#define NVA3_PDISP_SOR_PWM_CTL_VAL 0x00ffffff +#define NV50_SOR_DP_CTRL(i, l) (0x0061c10c + (i) * 0x800 + (l) * 0x80) +#define NV50_SOR_DP_CTRL_ENABLED 0x00000001 +#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000 +#define NV50_SOR_DP_CTRL_LANE_MASK 0x001f0000 +#define NV50_SOR_DP_CTRL_LANE_0_ENABLED 0x00010000 +#define NV50_SOR_DP_CTRL_LANE_1_ENABLED 0x00020000 +#define NV50_SOR_DP_CTRL_LANE_2_ENABLED 0x00040000 +#define NV50_SOR_DP_CTRL_LANE_3_ENABLED 0x00080000 +#define NV50_SOR_DP_CTRL_TRAINING_PATTERN 0x0f000000 +#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_DISABLED 0x00000000 +#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_1 0x01000000 +#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000 +#define NV50_SOR_DP_UNK118(i, l) (0x0061c118 + (i) * 0x800 + (l) * 0x80) +#define NV50_SOR_DP_UNK120(i, l) (0x0061c120 + (i) * 0x800 + (l) * 0x80) +#define NV50_SOR_DP_SCFG(i, l) (0x0061c128 + (i) * 0x800 + (l) * 0x80) +#define NV50_SOR_DP_UNK130(i, l) (0x0061c130 + (i) * 0x800 + (l) * 0x80) + +#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000) +#define NV50_PDISPLAY_USER_PUT(i) ((i) * 0x1000 + 0x00640000) +#define NV50_PDISPLAY_USER_GET(i) ((i) * 0x1000 + 0x00640004) + +#define NV50_PDISPLAY_CURSOR_USER 0x00647000 +#define NV50_PDISPLAY_CURSOR_USER_POS_CTRL(i) ((i) * 0x1000 + 0x00647080) +#define NV50_PDISPLAY_CURSOR_USER_POS(i) ((i) * 0x1000 + 0x00647084) diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c new file mode 100644 index 0000000..ca5492a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -0,0 +1,112 @@ +#include <linux/pagemap.h> +#include <linux/slab.h> + +#include <subdev/fb.h> + +#include "nouveau_drm.h" +#include "nouveau_ttm.h" + +struct nouveau_sgdma_be { + /* this has to be the first field so populate/unpopulated in + * nouve_bo.c works properly, otherwise have to move them here + */ + struct ttm_dma_tt ttm; + struct drm_device *dev; + struct nouveau_mem *node; +}; + +static void +nouveau_sgdma_destroy(struct ttm_tt *ttm) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; + + if (ttm) { + ttm_dma_tt_fini(&nvbe->ttm); + kfree(nvbe); + } +} + +static int +nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; + struct nouveau_mem *node = mem->mm_node; + u64 size = mem->num_pages << 12; + + if (ttm->sg) { + node->sg = ttm->sg; + nouveau_vm_map_sg_table(&node->vma[0], 0, size, node); + } else { + node->pages = nvbe->ttm.dma_address; + nouveau_vm_map_sg(&node->vma[0], 0, size, node); + } + + nvbe->node = node; + return 0; +} + +static int +nv04_sgdma_unbind(struct ttm_tt *ttm) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; + nouveau_vm_unmap(&nvbe->node->vma[0]); + return 0; +} + +static struct ttm_backend_func nv04_sgdma_backend = { + .bind = nv04_sgdma_bind, + .unbind = nv04_sgdma_unbind, + .destroy = nouveau_sgdma_destroy +}; + +static int +nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; + struct nouveau_mem *node = mem->mm_node; + + /* noop: bound in move_notify() */ + if (ttm->sg) { + node->sg = ttm->sg; + } else + node->pages = nvbe->ttm.dma_address; + return 0; +} + +static int +nv50_sgdma_unbind(struct ttm_tt *ttm) +{ + /* noop: unbound in move_notify() */ + return 0; +} + +static struct ttm_backend_func nv50_sgdma_backend = { + .bind = nv50_sgdma_bind, + .unbind = nv50_sgdma_unbind, + .destroy = nouveau_sgdma_destroy +}; + +struct ttm_tt * +nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page) +{ + struct nouveau_drm *drm = nouveau_bdev(bdev); + struct nouveau_sgdma_be *nvbe; + + nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); + if (!nvbe) + return NULL; + + nvbe->dev = drm->dev; + if (nv_device(drm->device)->card_type < NV_50) + nvbe->ttm.ttm.func = &nv04_sgdma_backend; + else + nvbe->ttm.ttm.func = &nv50_sgdma_backend; + + if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) { + kfree(nvbe); + return NULL; + } + return &nvbe->ttm.ttm; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c new file mode 100644 index 0000000..f19a15a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -0,0 +1,442 @@ +/* + * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA, + * All Rights Reserved. + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA, + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <subdev/fb.h> +#include <subdev/vm.h> +#include <subdev/instmem.h> + +#include "nouveau_drm.h" +#include "nouveau_ttm.h" +#include "nouveau_gem.h" + +static int +nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) +{ + struct nouveau_drm *drm = nouveau_bdev(man->bdev); + struct nouveau_fb *pfb = nouveau_fb(drm->device); + man->priv = pfb; + return 0; +} + +static int +nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) +{ + man->priv = NULL; + return 0; +} + +static inline void +nouveau_mem_node_cleanup(struct nouveau_mem *node) +{ + if (node->vma[0].node) { + nouveau_vm_unmap(&node->vma[0]); + nouveau_vm_put(&node->vma[0]); + } + + if (node->vma[1].node) { + nouveau_vm_unmap(&node->vma[1]); + nouveau_vm_put(&node->vma[1]); + } +} + +static void +nouveau_vram_manager_del(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem) +{ + struct nouveau_drm *drm = nouveau_bdev(man->bdev); + struct nouveau_fb *pfb = nouveau_fb(drm->device); + nouveau_mem_node_cleanup(mem->mm_node); + pfb->ram.put(pfb, (struct nouveau_mem **)&mem->mm_node); +} + +static int +nouveau_vram_manager_new(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem) +{ + struct nouveau_drm *drm = nouveau_bdev(man->bdev); + struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nouveau_bo *nvbo = nouveau_bo(bo); + struct nouveau_mem *node; + u32 size_nc = 0; + int ret; + + if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) + size_nc = 1 << nvbo->page_shift; + + ret = pfb->ram.get(pfb, mem->num_pages << PAGE_SHIFT, + mem->page_alignment << PAGE_SHIFT, size_nc, + (nvbo->tile_flags >> 8) & 0x3ff, &node); + if (ret) { + mem->mm_node = NULL; + return (ret == -ENOSPC) ? 0 : ret; + } + + node->page_shift = nvbo->page_shift; + + mem->mm_node = node; + mem->start = node->offset >> PAGE_SHIFT; + return 0; +} + +static void +nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) +{ + struct nouveau_fb *pfb = man->priv; + struct nouveau_mm *mm = &pfb->vram; + struct nouveau_mm_node *r; + u32 total = 0, free = 0; + + mutex_lock(&mm->mutex); + list_for_each_entry(r, &mm->nodes, nl_entry) { + printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n", + prefix, r->type, ((u64)r->offset << 12), + (((u64)r->offset + r->length) << 12)); + + total += r->length; + if (!r->type) + free += r->length; + } + mutex_unlock(&mm->mutex); + + printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n", + prefix, (u64)total << 12, (u64)free << 12); + printk(KERN_DEBUG "%s block: 0x%08x\n", + prefix, mm->block_size << 12); +} + +const struct ttm_mem_type_manager_func nouveau_vram_manager = { + nouveau_vram_manager_init, + nouveau_vram_manager_fini, + nouveau_vram_manager_new, + nouveau_vram_manager_del, + nouveau_vram_manager_debug +}; + +static int +nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) +{ + return 0; +} + +static int +nouveau_gart_manager_fini(struct ttm_mem_type_manager *man) +{ + return 0; +} + +static void +nouveau_gart_manager_del(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem) +{ + nouveau_mem_node_cleanup(mem->mm_node); + kfree(mem->mm_node); + mem->mm_node = NULL; +} + +static int +nouveau_gart_manager_new(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem) +{ + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + struct nouveau_mem *node; + + if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024)) + return -ENOMEM; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + node->page_shift = 12; + + switch (nv_device(drm->device)->card_type) { + case NV_50: + if (nv_device(drm->device)->chipset != 0x50) + node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; + break; + case NV_C0: + case NV_D0: + case NV_E0: + node->memtype = (nvbo->tile_flags & 0xff00) >> 8; + break; + default: + break; + } + + mem->mm_node = node; + mem->start = 0; + return 0; +} + +static void +nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) +{ +} + +const struct ttm_mem_type_manager_func nouveau_gart_manager = { + nouveau_gart_manager_init, + nouveau_gart_manager_fini, + nouveau_gart_manager_new, + nouveau_gart_manager_del, + nouveau_gart_manager_debug +}; + +#include <core/subdev/vm/nv04.h> +static int +nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) +{ + struct nouveau_drm *drm = nouveau_bdev(man->bdev); + struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device); + struct nv04_vmmgr_priv *priv = (void *)vmm; + struct nouveau_vm *vm = NULL; + nouveau_vm_ref(priv->vm, &vm, NULL); + man->priv = vm; + return 0; +} + +static int +nv04_gart_manager_fini(struct ttm_mem_type_manager *man) +{ + struct nouveau_vm *vm = man->priv; + nouveau_vm_ref(NULL, &vm, NULL); + man->priv = NULL; + return 0; +} + +static void +nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) +{ + struct nouveau_mem *node = mem->mm_node; + if (node->vma[0].node) + nouveau_vm_put(&node->vma[0]); + kfree(mem->mm_node); + mem->mm_node = NULL; +} + +static int +nv04_gart_manager_new(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem) +{ + struct nouveau_mem *node; + int ret; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + node->page_shift = 12; + + ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift, + NV_MEM_ACCESS_RW, &node->vma[0]); + if (ret) { + kfree(node); + return ret; + } + + mem->mm_node = node; + mem->start = node->vma[0].offset >> PAGE_SHIFT; + return 0; +} + +static void +nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) +{ +} + +const struct ttm_mem_type_manager_func nv04_gart_manager = { + nv04_gart_manager_init, + nv04_gart_manager_fini, + nv04_gart_manager_new, + nv04_gart_manager_del, + nv04_gart_manager_debug +}; + +int +nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_file *file_priv = filp->private_data; + struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); + + if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) + return drm_mmap(filp, vma); + + return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); +} + +static int +nouveau_ttm_mem_global_init(struct drm_global_reference *ref) +{ + return ttm_mem_global_init(ref->object); +} + +static void +nouveau_ttm_mem_global_release(struct drm_global_reference *ref) +{ + ttm_mem_global_release(ref->object); +} + +int +nouveau_ttm_global_init(struct nouveau_drm *drm) +{ + struct drm_global_reference *global_ref; + int ret; + + global_ref = &drm->ttm.mem_global_ref; + global_ref->global_type = DRM_GLOBAL_TTM_MEM; + global_ref->size = sizeof(struct ttm_mem_global); + global_ref->init = &nouveau_ttm_mem_global_init; + global_ref->release = &nouveau_ttm_mem_global_release; + + ret = drm_global_item_ref(global_ref); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed setting up TTM memory accounting\n"); + drm->ttm.mem_global_ref.release = NULL; + return ret; + } + + drm->ttm.bo_global_ref.mem_glob = global_ref->object; + global_ref = &drm->ttm.bo_global_ref.ref; + global_ref->global_type = DRM_GLOBAL_TTM_BO; + global_ref->size = sizeof(struct ttm_bo_global); + global_ref->init = &ttm_bo_global_init; + global_ref->release = &ttm_bo_global_release; + + ret = drm_global_item_ref(global_ref); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed setting up TTM BO subsystem\n"); + drm_global_item_unref(&drm->ttm.mem_global_ref); + drm->ttm.mem_global_ref.release = NULL; + return ret; + } + + return 0; +} + +void +nouveau_ttm_global_release(struct nouveau_drm *drm) +{ + if (drm->ttm.mem_global_ref.release == NULL) + return; + + drm_global_item_unref(&drm->ttm.bo_global_ref.ref); + drm_global_item_unref(&drm->ttm.mem_global_ref); + drm->ttm.mem_global_ref.release = NULL; +} + +int +nouveau_ttm_init(struct nouveau_drm *drm) +{ + struct drm_device *dev = drm->dev; + u32 bits; + int ret; + + bits = nouveau_vmmgr(drm->device)->dma_bits; + if ( drm->agp.stat == ENABLED || + !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits))) + bits = 32; + + ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits)); + if (ret) + return ret; + + ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(bits)); + if (ret) + pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32)); + + ret = nouveau_ttm_global_init(drm); + if (ret) + return ret; + + ret = ttm_bo_device_init(&drm->ttm.bdev, + drm->ttm.bo_global_ref.ref.object, + &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET, + bits <= 32 ? true : false); + if (ret) { + NV_ERROR(drm, "error initialising bo driver, %d\n", ret); + return ret; + } + + /* VRAM init */ + drm->gem.vram_available = nouveau_fb(drm->device)->ram.size; + drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved; + + ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, + drm->gem.vram_available >> PAGE_SHIFT); + if (ret) { + NV_ERROR(drm, "VRAM mm init failed, %d\n", ret); + return ret; + } + + drm->ttm.mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1), + pci_resource_len(dev->pdev, 1), + DRM_MTRR_WC); + + /* GART init */ + if (drm->agp.stat != ENABLED) { + drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit; + if (drm->gem.gart_available > 512 * 1024 * 1024) + drm->gem.gart_available = 512 * 1024 * 1024; + } else { + drm->gem.gart_available = drm->agp.size; + } + + ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT, + drm->gem.gart_available >> PAGE_SHIFT); + if (ret) { + NV_ERROR(drm, "GART mm init failed, %d\n", ret); + return ret; + } + + NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20)); + NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20)); + return 0; +} + +void +nouveau_ttm_fini(struct nouveau_drm *drm) +{ + mutex_lock(&drm->dev->struct_mutex); + ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); + ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); + mutex_unlock(&drm->dev->struct_mutex); + + ttm_bo_device_release(&drm->ttm.bdev); + + nouveau_ttm_global_release(drm); + + if (drm->ttm.mtrr >= 0) { + drm_mtrr_del(drm->ttm.mtrr, + pci_resource_start(drm->dev->pdev, 1), + pci_resource_len(drm->dev->pdev, 1), DRM_MTRR_WC); + drm->ttm.mtrr = -1; + } +} diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h new file mode 100644 index 0000000..25b0de4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h @@ -0,0 +1,25 @@ +#ifndef __NOUVEAU_TTM_H__ +#define __NOUVEAU_TTM_H__ + +static inline struct nouveau_drm * +nouveau_bdev(struct ttm_bo_device *bd) +{ + return container_of(bd, struct nouveau_drm, ttm.bdev); +} + +extern const struct ttm_mem_type_manager_func nouveau_vram_manager; +extern const struct ttm_mem_type_manager_func nouveau_gart_manager; +extern const struct ttm_mem_type_manager_func nv04_gart_manager; + +struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *, + unsigned long size, u32 page_flags, + struct page *dummy_read_page); + +int nouveau_ttm_init(struct nouveau_drm *drm); +void nouveau_ttm_fini(struct nouveau_drm *drm); +int nouveau_ttm_mmap(struct file *, struct vm_area_struct *); + +int nouveau_ttm_global_init(struct nouveau_drm *); +void nouveau_ttm_global_release(struct nouveau_drm *); + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c new file mode 100644 index 0000000..25d3495 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -0,0 +1,98 @@ +#include <linux/vgaarb.h> +#include <linux/vga_switcheroo.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "nouveau_drm.h" +#include "nouveau_acpi.h" +#include "nouveau_fbcon.h" +#include "nouveau_vga.h" + +static unsigned int +nouveau_vga_set_decode(void *priv, bool state) +{ + struct nouveau_device *device = nouveau_dev(priv); + + if (device->chipset >= 0x40) + nv_wr32(device, 0x088054, state); + else + nv_wr32(device, 0x001854, state); + + if (state) + return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | + VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; + else + return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; +} + +static void +nouveau_switcheroo_set_state(struct pci_dev *pdev, + enum vga_switcheroo_state state) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + if (state == VGA_SWITCHEROO_ON) { + printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + nouveau_pmops_resume(&pdev->dev); + drm_kms_helper_poll_enable(dev); + dev->switch_power_state = DRM_SWITCH_POWER_ON; + } else { + printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + drm_kms_helper_poll_disable(dev); + nouveau_switcheroo_optimus_dsm(); + nouveau_pmops_suspend(&pdev->dev); + dev->switch_power_state = DRM_SWITCH_POWER_OFF; + } +} + +static void +nouveau_switcheroo_reprobe(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + nouveau_fbcon_output_poll_changed(dev); +} + +static bool +nouveau_switcheroo_can_switch(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + bool can_switch; + + spin_lock(&dev->count_lock); + can_switch = (dev->open_count == 0); + spin_unlock(&dev->count_lock); + return can_switch; +} + +static const struct vga_switcheroo_client_ops +nouveau_switcheroo_ops = { + .set_gpu_state = nouveau_switcheroo_set_state, + .reprobe = nouveau_switcheroo_reprobe, + .can_switch = nouveau_switcheroo_can_switch, +}; + +void +nouveau_vga_init(struct nouveau_drm *drm) +{ + struct drm_device *dev = drm->dev; + vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); + vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops); +} + +void +nouveau_vga_fini(struct nouveau_drm *drm) +{ + struct drm_device *dev = drm->dev; + vga_switcheroo_unregister_client(dev->pdev); + vga_client_register(dev->pdev, NULL, NULL, NULL); +} + + +void +nouveau_vga_lastclose(struct drm_device *dev) +{ + vga_switcheroo_process_delayed_switch(); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.h b/drivers/gpu/drm/nouveau/nouveau_vga.h new file mode 100644 index 0000000..ea3ad69 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_vga.h @@ -0,0 +1,8 @@ +#ifndef __NOUVEAU_VGA_H__ +#define __NOUVEAU_VGA_H__ + +void nouveau_vga_init(struct nouveau_drm *); +void nouveau_vga_fini(struct nouveau_drm *); +void nouveau_vga_lastclose(struct drm_device *dev); + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c new file mode 100644 index 0000000..9976414 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_volt.c @@ -0,0 +1,250 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <drm/drmP.h> + +#include "nouveau_drm.h" +#include "nouveau_pm.h" + +#include <subdev/bios/gpio.h> +#include <subdev/gpio.h> + +static const enum dcb_gpio_func_name vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 }; +static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]); + +int +nouveau_voltage_gpio_get(struct drm_device *dev) +{ + struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage; + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_gpio *gpio = nouveau_gpio(device); + u8 vid = 0; + int i; + + for (i = 0; i < nr_vidtag; i++) { + if (!(volt->vid_mask & (1 << i))) + continue; + + vid |= gpio->get(gpio, 0, vidtag[i], 0xff) << i; + } + + return nouveau_volt_lvl_lookup(dev, vid); +} + +int +nouveau_voltage_gpio_set(struct drm_device *dev, int voltage) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_gpio *gpio = nouveau_gpio(device); + struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage; + int vid, i; + + vid = nouveau_volt_vid_lookup(dev, voltage); + if (vid < 0) + return vid; + + for (i = 0; i < nr_vidtag; i++) { + if (!(volt->vid_mask & (1 << i))) + continue; + + gpio->set(gpio, 0, vidtag[i], 0xff, !!(vid & (1 << i))); + } + + return 0; +} + +int +nouveau_volt_vid_lookup(struct drm_device *dev, int voltage) +{ + struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage; + int i; + + for (i = 0; i < volt->nr_level; i++) { + if (volt->level[i].voltage == voltage) + return volt->level[i].vid; + } + + return -ENOENT; +} + +int +nouveau_volt_lvl_lookup(struct drm_device *dev, int vid) +{ + struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage; + int i; + + for (i = 0; i < volt->nr_level; i++) { + if (volt->level[i].vid == vid) + return volt->level[i].voltage; + } + + return -ENOENT; +} + +void +nouveau_volt_init(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_gpio *gpio = nouveau_gpio(drm->device); + struct nouveau_pm *pm = nouveau_pm(dev); + struct nouveau_pm_voltage *voltage = &pm->voltage; + struct nvbios *bios = &drm->vbios; + struct dcb_gpio_func func; + struct bit_entry P; + u8 *volt = NULL, *entry; + int i, headerlen, recordlen, entries, vidmask, vidshift; + + if (bios->type == NVBIOS_BIT) { + if (bit_table(dev, 'P', &P)) + return; + + if (P.version == 1) + volt = ROMPTR(dev, P.data[16]); + else + if (P.version == 2) + volt = ROMPTR(dev, P.data[12]); + else { + NV_WARN(drm, "unknown volt for BIT P %d\n", P.version); + } + } else { + if (bios->data[bios->offset + 6] < 0x27) { + NV_DEBUG(drm, "BMP version too old for voltage\n"); + return; + } + + volt = ROMPTR(dev, bios->data[bios->offset + 0x98]); + } + + if (!volt) { + NV_DEBUG(drm, "voltage table pointer invalid\n"); + return; + } + + switch (volt[0]) { + case 0x10: + case 0x11: + case 0x12: + headerlen = 5; + recordlen = volt[1]; + entries = volt[2]; + vidshift = 0; + vidmask = volt[4]; + break; + case 0x20: + headerlen = volt[1]; + recordlen = volt[3]; + entries = volt[2]; + vidshift = 0; /* could be vidshift like 0x30? */ + vidmask = volt[5]; + break; + case 0x30: + headerlen = volt[1]; + recordlen = volt[2]; + entries = volt[3]; + vidmask = volt[4]; + /* no longer certain what volt[5] is, if it's related to + * the vid shift then it's definitely not a function of + * how many bits are set. + * + * after looking at a number of nva3+ vbios images, they + * all seem likely to have a static shift of 2.. lets + * go with that for now until proven otherwise. + */ + vidshift = 2; + break; + case 0x40: + headerlen = volt[1]; + recordlen = volt[2]; + entries = volt[3]; /* not a clue what the entries are for.. */ + vidmask = volt[11]; /* guess.. */ + vidshift = 0; + break; + default: + NV_WARN(drm, "voltage table 0x%02x unknown\n", volt[0]); + return; + } + + /* validate vid mask */ + voltage->vid_mask = vidmask; + if (!voltage->vid_mask) + return; + + i = 0; + while (vidmask) { + if (i > nr_vidtag) { + NV_DEBUG(drm, "vid bit %d unknown\n", i); + return; + } + + if (gpio && gpio->find(gpio, 0, vidtag[i], 0xff, &func)) { + NV_DEBUG(drm, "vid bit %d has no gpio tag\n", i); + return; + } + + vidmask >>= 1; + i++; + } + + /* parse vbios entries into common format */ + voltage->version = volt[0]; + if (voltage->version < 0x40) { + voltage->nr_level = entries; + voltage->level = + kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL); + if (!voltage->level) + return; + + entry = volt + headerlen; + for (i = 0; i < entries; i++, entry += recordlen) { + voltage->level[i].voltage = entry[0] * 10000; + voltage->level[i].vid = entry[1] >> vidshift; + } + } else { + u32 volt_uv = ROM32(volt[4]); + s16 step_uv = ROM16(volt[8]); + u8 vid; + + voltage->nr_level = voltage->vid_mask + 1; + voltage->level = kcalloc(voltage->nr_level, + sizeof(*voltage->level), GFP_KERNEL); + if (!voltage->level) + return; + + for (vid = 0; vid <= voltage->vid_mask; vid++) { + voltage->level[vid].voltage = volt_uv; + voltage->level[vid].vid = vid; + volt_uv += step_uv; + } + } + + voltage->supported = true; +} + +void +nouveau_volt_fini(struct drm_device *dev) +{ + struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage; + + kfree(volt->level); +} diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c new file mode 100644 index 0000000..77dcc9c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -0,0 +1,277 @@ +/* + * Copyright 2009 Ben Skeggs + * Copyright 2008 Stuart Bennett + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include <core/object.h> + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_fbcon.h" + +int +nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); + struct nouveau_channel *chan = drm->channel; + int ret; + + ret = RING_SPACE(chan, 4); + if (ret) + return ret; + + BEGIN_NV04(chan, NvSubImageBlit, 0x0300, 3); + OUT_RING(chan, (region->sy << 16) | region->sx); + OUT_RING(chan, (region->dy << 16) | region->dx); + OUT_RING(chan, (region->height << 16) | region->width); + FIRE_RING(chan); + return 0; +} + +int +nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); + struct nouveau_channel *chan = drm->channel; + int ret; + + ret = RING_SPACE(chan, 7); + if (ret) + return ret; + + BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1); + OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); + BEGIN_NV04(chan, NvSubGdiRect, 0x03fc, 1); + if (info->fix.visual == FB_VISUAL_TRUECOLOR || + info->fix.visual == FB_VISUAL_DIRECTCOLOR) + OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]); + else + OUT_RING(chan, rect->color); + BEGIN_NV04(chan, NvSubGdiRect, 0x0400, 2); + OUT_RING(chan, (rect->dx << 16) | rect->dy); + OUT_RING(chan, (rect->width << 16) | rect->height); + FIRE_RING(chan); + return 0; +} + +int +nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); + struct nouveau_channel *chan = drm->channel; + uint32_t fg; + uint32_t bg; + uint32_t dsize; + uint32_t width; + uint32_t *data = (uint32_t *)image->data; + int ret; + + if (image->depth != 1) + return -ENODEV; + + ret = RING_SPACE(chan, 8); + if (ret) + return ret; + + width = ALIGN(image->width, 8); + dsize = ALIGN(width * image->height, 32) >> 5; + + if (info->fix.visual == FB_VISUAL_TRUECOLOR || + info->fix.visual == FB_VISUAL_DIRECTCOLOR) { + fg = ((uint32_t *) info->pseudo_palette)[image->fg_color]; + bg = ((uint32_t *) info->pseudo_palette)[image->bg_color]; + } else { + fg = image->fg_color; + bg = image->bg_color; + } + + BEGIN_NV04(chan, NvSubGdiRect, 0x0be4, 7); + OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); + OUT_RING(chan, ((image->dy + image->height) << 16) | + ((image->dx + image->width) & 0xffff)); + OUT_RING(chan, bg); + OUT_RING(chan, fg); + OUT_RING(chan, (image->height << 16) | width); + OUT_RING(chan, (image->height << 16) | image->width); + OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); + + while (dsize) { + int iter_len = dsize > 128 ? 128 : dsize; + + ret = RING_SPACE(chan, iter_len + 1); + if (ret) + return ret; + + BEGIN_NV04(chan, NvSubGdiRect, 0x0c00, iter_len); + OUT_RINGp(chan, data, iter_len); + data += iter_len; + dsize -= iter_len; + } + + FIRE_RING(chan); + return 0; +} + +int +nv04_fbcon_accel_init(struct fb_info *info) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_channel *chan = drm->channel; + struct nouveau_device *device = nv_device(drm->device); + struct nouveau_object *object; + int surface_fmt, pattern_fmt, rect_fmt; + int ret; + + switch (info->var.bits_per_pixel) { + case 8: + surface_fmt = 1; + pattern_fmt = 3; + rect_fmt = 3; + break; + case 16: + surface_fmt = 4; + pattern_fmt = 1; + rect_fmt = 1; + break; + case 32: + switch (info->var.transp.length) { + case 0: /* depth 24 */ + case 8: /* depth 32 */ + break; + default: + return -EINVAL; + } + + surface_fmt = 6; + pattern_fmt = 3; + rect_fmt = 3; + break; + default: + return -EINVAL; + } + + ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvCtxSurf2D, + device->card_type >= NV_10 ? 0x0062 : 0x0042, + NULL, 0, &object); + if (ret) + return ret; + + ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvClipRect, + 0x0019, NULL, 0, &object); + if (ret) + return ret; + + ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvRop, + 0x0043, NULL, 0, &object); + if (ret) + return ret; + + ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImagePatt, + 0x0044, NULL, 0, &object); + if (ret) + return ret; + + ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvGdiRect, + 0x004a, NULL, 0, &object); + if (ret) + return ret; + + ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImageBlit, + device->chipset >= 0x11 ? 0x009f : 0x005f, + NULL, 0, &object); + if (ret) + return ret; + + if (RING_SPACE(chan, 49)) { + nouveau_fbcon_gpu_lockup(info); + return 0; + } + + BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); + OUT_RING(chan, NvCtxSurf2D); + BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0184, 2); + OUT_RING(chan, NvDmaFB); + OUT_RING(chan, NvDmaFB); + BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 4); + OUT_RING(chan, surface_fmt); + OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); + OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); + OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); + + BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); + OUT_RING(chan, NvRop); + BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 1); + OUT_RING(chan, 0x55); + + BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); + OUT_RING(chan, NvImagePatt); + BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 8); + OUT_RING(chan, pattern_fmt); +#ifdef __BIG_ENDIAN + OUT_RING(chan, 2); +#else + OUT_RING(chan, 1); +#endif + OUT_RING(chan, 0); + OUT_RING(chan, 1); + OUT_RING(chan, ~0); + OUT_RING(chan, ~0); + OUT_RING(chan, ~0); + OUT_RING(chan, ~0); + + BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); + OUT_RING(chan, NvClipRect); + BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 2); + OUT_RING(chan, 0); + OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); + + BEGIN_NV04(chan, NvSubImageBlit, 0x0000, 1); + OUT_RING(chan, NvImageBlit); + BEGIN_NV04(chan, NvSubImageBlit, 0x019c, 1); + OUT_RING(chan, NvCtxSurf2D); + BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1); + OUT_RING(chan, 3); + + BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1); + OUT_RING(chan, NvGdiRect); + BEGIN_NV04(chan, NvSubGdiRect, 0x0198, 1); + OUT_RING(chan, NvCtxSurf2D); + BEGIN_NV04(chan, NvSubGdiRect, 0x0188, 2); + OUT_RING(chan, NvImagePatt); + OUT_RING(chan, NvRop); + BEGIN_NV04(chan, NvSubGdiRect, 0x0304, 1); + OUT_RING(chan, 1); + BEGIN_NV04(chan, NvSubGdiRect, 0x0300, 1); + OUT_RING(chan, rect_fmt); + BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1); + OUT_RING(chan, 3); + + FIRE_RING(chan); + + return 0; +} + diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c new file mode 100644 index 0000000..94eadd1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_fence.c @@ -0,0 +1,111 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <engine/fifo.h> + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_fence.h" + +struct nv04_fence_chan { + struct nouveau_fence_chan base; +}; + +struct nv04_fence_priv { + struct nouveau_fence_priv base; +}; + +static int +nv04_fence_emit(struct nouveau_fence *fence) +{ + struct nouveau_channel *chan = fence->channel; + int ret = RING_SPACE(chan, 2); + if (ret == 0) { + BEGIN_NV04(chan, NvSubSw, 0x0150, 1); + OUT_RING (chan, fence->sequence); + FIRE_RING (chan); + } + return ret; +} + +static int +nv04_fence_sync(struct nouveau_fence *fence, + struct nouveau_channel *prev, struct nouveau_channel *chan) +{ + return -ENODEV; +} + +static u32 +nv04_fence_read(struct nouveau_channel *chan) +{ + struct nouveau_fifo_chan *fifo = (void *)chan->object; + return atomic_read(&fifo->refcnt); +} + +static void +nv04_fence_context_del(struct nouveau_channel *chan) +{ + struct nv04_fence_chan *fctx = chan->fence; + nouveau_fence_context_del(&fctx->base); + chan->fence = NULL; + kfree(fctx); +} + +static int +nv04_fence_context_new(struct nouveau_channel *chan) +{ + struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL); + if (fctx) { + nouveau_fence_context_new(&fctx->base); + fctx->base.emit = nv04_fence_emit; + fctx->base.sync = nv04_fence_sync; + fctx->base.read = nv04_fence_read; + chan->fence = fctx; + return 0; + } + return -ENOMEM; +} + +static void +nv04_fence_destroy(struct nouveau_drm *drm) +{ + struct nv04_fence_priv *priv = drm->fence; + drm->fence = NULL; + kfree(priv); +} + +int +nv04_fence_create(struct nouveau_drm *drm) +{ + struct nv04_fence_priv *priv; + + priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base.dtor = nv04_fence_destroy; + priv->base.context_new = nv04_fence_context_new; + priv->base.context_del = nv04_fence_context_del; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c new file mode 100644 index 0000000..27afc0e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_pm.c @@ -0,0 +1,146 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <drm/drmP.h> +#include "nouveau_drm.h" +#include "nouveau_reg.h" +#include "dispnv04/hw.h" +#include "nouveau_pm.h" + +#include <subdev/bios/pll.h> +#include <subdev/clock.h> +#include <subdev/timer.h> + +int +nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) +{ + int ret; + + ret = nouveau_hw_get_clock(dev, PLL_CORE); + if (ret < 0) + return ret; + perflvl->core = ret; + + ret = nouveau_hw_get_clock(dev, PLL_MEMORY); + if (ret < 0) + return ret; + perflvl->memory = ret; + + return 0; +} + +struct nv04_pm_clock { + struct nvbios_pll pll; + struct nouveau_pll_vals calc; +}; + +struct nv04_pm_state { + struct nv04_pm_clock core; + struct nv04_pm_clock memory; +}; + +static int +calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_bios *bios = nouveau_bios(device); + struct nouveau_clock *pclk = nouveau_clock(device); + int ret; + + ret = nvbios_pll_parse(bios, id, &clk->pll); + if (ret) + return ret; + + ret = pclk->pll_calc(pclk, &clk->pll, khz, &clk->calc); + if (!ret) + return -EINVAL; + + return 0; +} + +void * +nv04_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) +{ + struct nv04_pm_state *info; + int ret; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); + + ret = calc_pll(dev, PLL_CORE, perflvl->core, &info->core); + if (ret) + goto error; + + if (perflvl->memory) { + ret = calc_pll(dev, PLL_MEMORY, perflvl->memory, &info->memory); + if (ret) + goto error; + } + + return info; +error: + kfree(info); + return ERR_PTR(ret); +} + +static void +prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_clock *pclk = nouveau_clock(device); + u32 reg = clk->pll.reg; + + /* thank the insane nouveau_hw_setpll() interface for this */ + if (device->card_type >= NV_40) + reg += 4; + + pclk->pll_prog(pclk, reg, &clk->calc); +} + +int +nv04_pm_clocks_set(struct drm_device *dev, void *pre_state) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_timer *ptimer = nouveau_timer(device); + struct nv04_pm_state *state = pre_state; + + prog_pll(dev, &state->core); + + if (state->memory.pll.reg) { + prog_pll(dev, &state->memory); + if (device->card_type < NV_30) { + if (device->card_type == NV_20) + nv_mask(device, 0x1002c4, 0, 1 << 20); + + /* Reset the DLLs */ + nv_mask(device, 0x1002c0, 0, 1 << 8); + } + } + + nv_ofuncs(ptimer)->init(nv_object(ptimer)); + + kfree(state); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c new file mode 100644 index 0000000..06f434f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv10_fence.c @@ -0,0 +1,110 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ + +#include <core/object.h> +#include <core/class.h> + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nv10_fence.h" + +int +nv10_fence_emit(struct nouveau_fence *fence) +{ + struct nouveau_channel *chan = fence->channel; + int ret = RING_SPACE(chan, 2); + if (ret == 0) { + BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1); + OUT_RING (chan, fence->sequence); + FIRE_RING (chan); + } + return ret; +} + + +static int +nv10_fence_sync(struct nouveau_fence *fence, + struct nouveau_channel *prev, struct nouveau_channel *chan) +{ + return -ENODEV; +} + +u32 +nv10_fence_read(struct nouveau_channel *chan) +{ + return nv_ro32(chan->object, 0x0048); +} + +void +nv10_fence_context_del(struct nouveau_channel *chan) +{ + struct nv10_fence_chan *fctx = chan->fence; + nouveau_fence_context_del(&fctx->base); + chan->fence = NULL; + kfree(fctx); +} + +int +nv10_fence_context_new(struct nouveau_channel *chan) +{ + struct nv10_fence_chan *fctx; + + fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); + if (!fctx) + return -ENOMEM; + + nouveau_fence_context_new(&fctx->base); + fctx->base.emit = nv10_fence_emit; + fctx->base.read = nv10_fence_read; + fctx->base.sync = nv10_fence_sync; + return 0; +} + +void +nv10_fence_destroy(struct nouveau_drm *drm) +{ + struct nv10_fence_priv *priv = drm->fence; + nouveau_bo_unmap(priv->bo); + if (priv->bo) + nouveau_bo_unpin(priv->bo); + nouveau_bo_ref(NULL, &priv->bo); + drm->fence = NULL; + kfree(priv); +} + +int +nv10_fence_create(struct nouveau_drm *drm) +{ + struct nv10_fence_priv *priv; + + priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base.dtor = nv10_fence_destroy; + priv->base.context_new = nv10_fence_context_new; + priv->base.context_del = nv10_fence_context_del; + spin_lock_init(&priv->lock); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h new file mode 100644 index 0000000..e5d9204 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv10_fence.h @@ -0,0 +1,19 @@ +#ifndef __NV10_FENCE_H_ +#define __NV10_FENCE_H_ + +#include <core/os.h> +#include "nouveau_fence.h" +#include "nouveau_bo.h" + +struct nv10_fence_chan { + struct nouveau_fence_chan base; +}; + +struct nv10_fence_priv { + struct nouveau_fence_priv base; + struct nouveau_bo *bo; + spinlock_t lock; + u32 sequence; +}; + +#endif diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c new file mode 100644 index 0000000..22aa996 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv17_fence.c @@ -0,0 +1,149 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ + +#include <core/object.h> +#include <core/class.h> + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nv10_fence.h" + +int +nv17_fence_sync(struct nouveau_fence *fence, + struct nouveau_channel *prev, struct nouveau_channel *chan) +{ + struct nv10_fence_priv *priv = chan->drm->fence; + u32 value; + int ret; + + if (!mutex_trylock(&prev->cli->mutex)) + return -EBUSY; + + spin_lock(&priv->lock); + value = priv->sequence; + priv->sequence += 2; + spin_unlock(&priv->lock); + + ret = RING_SPACE(prev, 5); + if (!ret) { + BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4); + OUT_RING (prev, NvSema); + OUT_RING (prev, 0); + OUT_RING (prev, value + 0); + OUT_RING (prev, value + 1); + FIRE_RING (prev); + } + + if (!ret && !(ret = RING_SPACE(chan, 5))) { + BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4); + OUT_RING (chan, NvSema); + OUT_RING (chan, 0); + OUT_RING (chan, value + 1); + OUT_RING (chan, value + 2); + FIRE_RING (chan); + } + + mutex_unlock(&prev->cli->mutex); + return 0; +} + +static int +nv17_fence_context_new(struct nouveau_channel *chan) +{ + struct nv10_fence_priv *priv = chan->drm->fence; + struct nv10_fence_chan *fctx; + struct ttm_mem_reg *mem = &priv->bo->bo.mem; + struct nouveau_object *object; + u32 start = mem->start * PAGE_SIZE; + u32 limit = start + mem->size - 1; + int ret = 0; + + fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); + if (!fctx) + return -ENOMEM; + + nouveau_fence_context_new(&fctx->base); + fctx->base.emit = nv10_fence_emit; + fctx->base.read = nv10_fence_read; + fctx->base.sync = nv17_fence_sync; + + ret = nouveau_object_new(nv_object(chan->cli), chan->handle, + NvSema, 0x0002, + &(struct nv_dma_class) { + .flags = NV_DMA_TARGET_VRAM | + NV_DMA_ACCESS_RDWR, + .start = start, + .limit = limit, + }, sizeof(struct nv_dma_class), + &object); + if (ret) + nv10_fence_context_del(chan); + return ret; +} + +void +nv17_fence_resume(struct nouveau_drm *drm) +{ + struct nv10_fence_priv *priv = drm->fence; + + nouveau_bo_wr32(priv->bo, 0, priv->sequence); +} + +int +nv17_fence_create(struct nouveau_drm *drm) +{ + struct nv10_fence_priv *priv; + int ret = 0; + + priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base.dtor = nv10_fence_destroy; + priv->base.resume = nv17_fence_resume; + priv->base.context_new = nv17_fence_context_new; + priv->base.context_del = nv10_fence_context_del; + spin_lock_init(&priv->lock); + + ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &priv->bo); + if (!ret) { + ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); + if (!ret) { + ret = nouveau_bo_map(priv->bo); + if (ret) + nouveau_bo_unpin(priv->bo); + } + if (ret) + nouveau_bo_ref(NULL, &priv->bo); + } + + if (ret) { + nv10_fence_destroy(drm); + return ret; + } + + nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c new file mode 100644 index 0000000..3af5bcd --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv40_pm.c @@ -0,0 +1,353 @@ +/* + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <drm/drmP.h> +#include "nouveau_drm.h" +#include "nouveau_bios.h" +#include "nouveau_pm.h" +#include "dispnv04/hw.h" + +#include <subdev/bios/pll.h> +#include <subdev/clock.h> +#include <subdev/timer.h> + +#include <engine/fifo.h> + +#define min2(a,b) ((a) < (b) ? (a) : (b)) + +static u32 +read_pll_1(struct drm_device *dev, u32 reg) +{ + struct nouveau_device *device = nouveau_dev(dev); + u32 ctrl = nv_rd32(device, reg + 0x00); + int P = (ctrl & 0x00070000) >> 16; + int N = (ctrl & 0x0000ff00) >> 8; + int M = (ctrl & 0x000000ff) >> 0; + u32 ref = 27000, clk = 0; + + if (ctrl & 0x80000000) + clk = ref * N / M; + + return clk >> P; +} + +static u32 +read_pll_2(struct drm_device *dev, u32 reg) +{ + struct nouveau_device *device = nouveau_dev(dev); + u32 ctrl = nv_rd32(device, reg + 0x00); + u32 coef = nv_rd32(device, reg + 0x04); + int N2 = (coef & 0xff000000) >> 24; + int M2 = (coef & 0x00ff0000) >> 16; + int N1 = (coef & 0x0000ff00) >> 8; + int M1 = (coef & 0x000000ff) >> 0; + int P = (ctrl & 0x00070000) >> 16; + u32 ref = 27000, clk = 0; + + if ((ctrl & 0x80000000) && M1) { + clk = ref * N1 / M1; + if ((ctrl & 0x40000100) == 0x40000000) { + if (M2) + clk = clk * N2 / M2; + else + clk = 0; + } + } + + return clk >> P; +} + +static u32 +read_clk(struct drm_device *dev, u32 src) +{ + switch (src) { + case 3: + return read_pll_2(dev, 0x004000); + case 2: + return read_pll_1(dev, 0x004008); + default: + break; + } + + return 0; +} + +int +nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) +{ + struct nouveau_device *device = nouveau_dev(dev); + u32 ctrl = nv_rd32(device, 0x00c040); + + perflvl->core = read_clk(dev, (ctrl & 0x00000003) >> 0); + perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4); + perflvl->memory = read_pll_2(dev, 0x4020); + return 0; +} + +struct nv40_pm_state { + u32 ctrl; + u32 npll_ctrl; + u32 npll_coef; + u32 spll; + u32 mpll_ctrl; + u32 mpll_coef; +}; + +static int +nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll, + u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_bios *bios = nouveau_bios(device); + struct nouveau_clock *pclk = nouveau_clock(device); + struct nouveau_pll_vals coef; + int ret; + + ret = nvbios_pll_parse(bios, reg, pll); + if (ret) + return ret; + + if (clk < pll->vco1.max_freq) + pll->vco2.max_freq = 0; + + pclk->pll_calc(pclk, pll, clk, &coef); + if (ret == 0) + return -ERANGE; + + *N1 = coef.N1; + *M1 = coef.M1; + if (N2 && M2) { + if (pll->vco2.max_freq) { + *N2 = coef.N2; + *M2 = coef.M2; + } else { + *N2 = 1; + *M2 = 1; + } + } + *log2P = coef.log2P; + return 0; +} + +void * +nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) +{ + struct nv40_pm_state *info; + struct nvbios_pll pll; + int N1, N2, M1, M2, log2P; + int ret; + + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); + + /* core/geometric clock */ + ret = nv40_calc_pll(dev, 0x004000, &pll, perflvl->core, + &N1, &M1, &N2, &M2, &log2P); + if (ret < 0) + goto out; + + if (N2 == M2) { + info->npll_ctrl = 0x80000100 | (log2P << 16); + info->npll_coef = (N1 << 8) | M1; + } else { + info->npll_ctrl = 0xc0000000 | (log2P << 16); + info->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1; + } + + /* use the second PLL for shader/rop clock, if it differs from core */ + if (perflvl->shader && perflvl->shader != perflvl->core) { + ret = nv40_calc_pll(dev, 0x004008, &pll, perflvl->shader, + &N1, &M1, NULL, NULL, &log2P); + if (ret < 0) + goto out; + + info->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1; + info->ctrl = 0x00000223; + } else { + info->spll = 0x00000000; + info->ctrl = 0x00000333; + } + + /* memory clock */ + if (!perflvl->memory) { + info->mpll_ctrl = 0x00000000; + goto out; + } + + ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory, + &N1, &M1, &N2, &M2, &log2P); + if (ret < 0) + goto out; + + info->mpll_ctrl = 0x80000000 | (log2P << 16); + info->mpll_ctrl |= min2(pll.bias_p + log2P, pll.max_p) << 20; + if (N2 == M2) { + info->mpll_ctrl |= 0x00000100; + info->mpll_coef = (N1 << 8) | M1; + } else { + info->mpll_ctrl |= 0x40000000; + info->mpll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1; + } + +out: + if (ret < 0) { + kfree(info); + info = ERR_PTR(ret); + } + return info; +} + +static bool +nv40_pm_gr_idle(void *data) +{ + struct drm_device *dev = data; + struct nouveau_device *device = nouveau_dev(dev); + + if ((nv_rd32(device, 0x400760) & 0x000000f0) >> 4 != + (nv_rd32(device, 0x400760) & 0x0000000f)) + return false; + + if (nv_rd32(device, 0x400700)) + return false; + + return true; +} + +int +nv40_pm_clocks_set(struct drm_device *dev, void *pre_state) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_fifo *pfifo = nouveau_fifo(device); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv40_pm_state *info = pre_state; + unsigned long flags; + struct bit_entry M; + u32 crtc_mask = 0; + u8 sr1[2]; + int i, ret = -EAGAIN; + + /* determine which CRTCs are active, fetch VGA_SR1 for each */ + for (i = 0; i < 2; i++) { + u32 vbl = nv_rd32(device, 0x600808 + (i * 0x2000)); + u32 cnt = 0; + do { + if (vbl != nv_rd32(device, 0x600808 + (i * 0x2000))) { + nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01); + sr1[i] = nv_rd08(device, 0x0c03c5 + (i * 0x2000)); + if (!(sr1[i] & 0x20)) + crtc_mask |= (1 << i); + break; + } + udelay(1); + } while (cnt++ < 32); + } + + /* halt and idle engines */ + pfifo->pause(pfifo, &flags); + + if (!nv_wait_cb(device, nv40_pm_gr_idle, dev)) + goto resume; + + ret = 0; + + /* set engine clocks */ + nv_mask(device, 0x00c040, 0x00000333, 0x00000000); + nv_wr32(device, 0x004004, info->npll_coef); + nv_mask(device, 0x004000, 0xc0070100, info->npll_ctrl); + nv_mask(device, 0x004008, 0xc007ffff, info->spll); + mdelay(5); + nv_mask(device, 0x00c040, 0x00000333, info->ctrl); + + if (!info->mpll_ctrl) + goto resume; + + /* wait for vblank start on active crtcs, disable memory access */ + for (i = 0; i < 2; i++) { + if (!(crtc_mask & (1 << i))) + continue; + nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000); + nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000); + nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01); + nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20); + } + + /* prepare ram for reclocking */ + nv_wr32(device, 0x1002d4, 0x00000001); /* precharge */ + nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */ + nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */ + nv_mask(device, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */ + nv_wr32(device, 0x1002dc, 0x00000001); /* enable self-refresh */ + + /* change the PLL of each memory partition */ + nv_mask(device, 0x00c040, 0x0000c000, 0x00000000); + switch (nv_device(drm->device)->chipset) { + case 0x40: + case 0x45: + case 0x41: + case 0x42: + case 0x47: + nv_mask(device, 0x004044, 0xc0771100, info->mpll_ctrl); + nv_mask(device, 0x00402c, 0xc0771100, info->mpll_ctrl); + nv_wr32(device, 0x004048, info->mpll_coef); + nv_wr32(device, 0x004030, info->mpll_coef); + case 0x43: + case 0x49: + case 0x4b: + nv_mask(device, 0x004038, 0xc0771100, info->mpll_ctrl); + nv_wr32(device, 0x00403c, info->mpll_coef); + default: + nv_mask(device, 0x004020, 0xc0771100, info->mpll_ctrl); + nv_wr32(device, 0x004024, info->mpll_coef); + break; + } + udelay(100); + nv_mask(device, 0x00c040, 0x0000c000, 0x0000c000); + + /* re-enable normal operation of memory controller */ + nv_wr32(device, 0x1002dc, 0x00000000); + nv_mask(device, 0x100210, 0x80000000, 0x80000000); + udelay(100); + + /* execute memory reset script from vbios */ + if (!bit_table(dev, 'M', &M)) + nouveau_bios_run_init_table(dev, ROM16(M.data[0]), NULL, 0); + + /* make sure we're in vblank (hopefully the same one as before), and + * then re-enable crtc memory access + */ + for (i = 0; i < 2; i++) { + if (!(crtc_mask & (1 << i))) + continue; + nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000); + nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01); + nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i]); + } + + /* resume engines */ +resume: + pfifo->start(pfifo, &flags); + kfree(info); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c new file mode 100644 index 0000000..dd5e01f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -0,0 +1,2300 @@ + /* + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <linux/dma-mapping.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_gem.h" +#include "nouveau_connector.h" +#include "nouveau_encoder.h" +#include "nouveau_crtc.h" +#include "nouveau_fence.h" +#include "nv50_display.h" + +#include <core/client.h> +#include <core/gpuobj.h> +#include <core/class.h> + +#include <subdev/timer.h> +#include <subdev/bar.h> +#include <subdev/fb.h> +#include <subdev/i2c.h> + +#define EVO_DMA_NR 9 + +#define EVO_MASTER (0x00) +#define EVO_FLIP(c) (0x01 + (c)) +#define EVO_OVLY(c) (0x05 + (c)) +#define EVO_OIMM(c) (0x09 + (c)) +#define EVO_CURS(c) (0x0d + (c)) + +/* offsets in shared sync bo of various structures */ +#define EVO_SYNC(c, o) ((c) * 0x0100 + (o)) +#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00) +#define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00) +#define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10) + +#define EVO_CORE_HANDLE (0xd1500000) +#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i)) +#define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff)) +#define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) | \ + (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8)) + +/****************************************************************************** + * EVO channel + *****************************************************************************/ + +struct nv50_chan { + struct nouveau_object *user; + u32 handle; +}; + +static int +nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head, + void *data, u32 size, struct nv50_chan *chan) +{ + struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); + const u32 oclass = EVO_CHAN_OCLASS(bclass, core); + const u32 handle = EVO_CHAN_HANDLE(bclass, head); + int ret; + + ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle, + oclass, data, size, &chan->user); + if (ret) + return ret; + + chan->handle = handle; + return 0; +} + +static void +nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan) +{ + struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); + if (chan->handle) + nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle); +} + +/****************************************************************************** + * PIO EVO channel + *****************************************************************************/ + +struct nv50_pioc { + struct nv50_chan base; +}; + +static void +nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc) +{ + nv50_chan_destroy(core, &pioc->base); +} + +static int +nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head, + void *data, u32 size, struct nv50_pioc *pioc) +{ + return nv50_chan_create(core, bclass, head, data, size, &pioc->base); +} + +/****************************************************************************** + * DMA EVO channel + *****************************************************************************/ + +struct nv50_dmac { + struct nv50_chan base; + dma_addr_t handle; + u32 *ptr; + + /* Protects against concurrent pushbuf access to this channel, lock is + * grabbed by evo_wait (if the pushbuf reservation is successful) and + * dropped again by evo_kick. */ + struct mutex lock; +}; + +static void +nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac) +{ + if (dmac->ptr) { + struct pci_dev *pdev = nv_device(core)->pdev; + pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle); + } + + nv50_chan_destroy(core, &dmac->base); +} + +static int +nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent) +{ + struct nouveau_fb *pfb = nouveau_fb(core); + struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); + struct nouveau_object *object; + int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP, + NV_DMA_IN_MEMORY_CLASS, + &(struct nv_dma_class) { + .flags = NV_DMA_TARGET_VRAM | + NV_DMA_ACCESS_RDWR, + .start = 0, + .limit = pfb->ram.size - 1, + .conf0 = NV50_DMA_CONF0_ENABLE | + NV50_DMA_CONF0_PART_256, + }, sizeof(struct nv_dma_class), &object); + if (ret) + return ret; + + ret = nouveau_object_new(client, parent, NvEvoFB16, + NV_DMA_IN_MEMORY_CLASS, + &(struct nv_dma_class) { + .flags = NV_DMA_TARGET_VRAM | + NV_DMA_ACCESS_RDWR, + .start = 0, + .limit = pfb->ram.size - 1, + .conf0 = NV50_DMA_CONF0_ENABLE | 0x70 | + NV50_DMA_CONF0_PART_256, + }, sizeof(struct nv_dma_class), &object); + if (ret) + return ret; + + ret = nouveau_object_new(client, parent, NvEvoFB32, + NV_DMA_IN_MEMORY_CLASS, + &(struct nv_dma_class) { + .flags = NV_DMA_TARGET_VRAM | + NV_DMA_ACCESS_RDWR, + .start = 0, + .limit = pfb->ram.size - 1, + .conf0 = NV50_DMA_CONF0_ENABLE | 0x7a | + NV50_DMA_CONF0_PART_256, + }, sizeof(struct nv_dma_class), &object); + return ret; +} + +static int +nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent) +{ + struct nouveau_fb *pfb = nouveau_fb(core); + struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); + struct nouveau_object *object; + int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP, + NV_DMA_IN_MEMORY_CLASS, + &(struct nv_dma_class) { + .flags = NV_DMA_TARGET_VRAM | + NV_DMA_ACCESS_RDWR, + .start = 0, + .limit = pfb->ram.size - 1, + .conf0 = NVC0_DMA_CONF0_ENABLE, + }, sizeof(struct nv_dma_class), &object); + if (ret) + return ret; + + ret = nouveau_object_new(client, parent, NvEvoFB16, + NV_DMA_IN_MEMORY_CLASS, + &(struct nv_dma_class) { + .flags = NV_DMA_TARGET_VRAM | + NV_DMA_ACCESS_RDWR, + .start = 0, + .limit = pfb->ram.size - 1, + .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe, + }, sizeof(struct nv_dma_class), &object); + if (ret) + return ret; + + ret = nouveau_object_new(client, parent, NvEvoFB32, + NV_DMA_IN_MEMORY_CLASS, + &(struct nv_dma_class) { + .flags = NV_DMA_TARGET_VRAM | + NV_DMA_ACCESS_RDWR, + .start = 0, + .limit = pfb->ram.size - 1, + .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe, + }, sizeof(struct nv_dma_class), &object); + return ret; +} + +static int +nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent) +{ + struct nouveau_fb *pfb = nouveau_fb(core); + struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); + struct nouveau_object *object; + int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP, + NV_DMA_IN_MEMORY_CLASS, + &(struct nv_dma_class) { + .flags = NV_DMA_TARGET_VRAM | + NV_DMA_ACCESS_RDWR, + .start = 0, + .limit = pfb->ram.size - 1, + .conf0 = NVD0_DMA_CONF0_ENABLE | + NVD0_DMA_CONF0_PAGE_LP, + }, sizeof(struct nv_dma_class), &object); + if (ret) + return ret; + + ret = nouveau_object_new(client, parent, NvEvoFB32, + NV_DMA_IN_MEMORY_CLASS, + &(struct nv_dma_class) { + .flags = NV_DMA_TARGET_VRAM | + NV_DMA_ACCESS_RDWR, + .start = 0, + .limit = pfb->ram.size - 1, + .conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe | + NVD0_DMA_CONF0_PAGE_LP, + }, sizeof(struct nv_dma_class), &object); + return ret; +} + +static int +nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head, + void *data, u32 size, u64 syncbuf, + struct nv50_dmac *dmac) +{ + struct nouveau_fb *pfb = nouveau_fb(core); + struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); + struct nouveau_object *object; + u32 pushbuf = *(u32 *)data; + int ret; + + mutex_init(&dmac->lock); + + dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE, + &dmac->handle); + if (!dmac->ptr) + return -ENOMEM; + + ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf, + NV_DMA_FROM_MEMORY_CLASS, + &(struct nv_dma_class) { + .flags = NV_DMA_TARGET_PCI_US | + NV_DMA_ACCESS_RD, + .start = dmac->handle + 0x0000, + .limit = dmac->handle + 0x0fff, + }, sizeof(struct nv_dma_class), &object); + if (ret) + return ret; + + ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base); + if (ret) + return ret; + + ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync, + NV_DMA_IN_MEMORY_CLASS, + &(struct nv_dma_class) { + .flags = NV_DMA_TARGET_VRAM | + NV_DMA_ACCESS_RDWR, + .start = syncbuf + 0x0000, + .limit = syncbuf + 0x0fff, + }, sizeof(struct nv_dma_class), &object); + if (ret) + return ret; + + ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM, + NV_DMA_IN_MEMORY_CLASS, + &(struct nv_dma_class) { + .flags = NV_DMA_TARGET_VRAM | + NV_DMA_ACCESS_RDWR, + .start = 0, + .limit = pfb->ram.size - 1, + }, sizeof(struct nv_dma_class), &object); + if (ret) + return ret; + + if (nv_device(core)->card_type < NV_C0) + ret = nv50_dmac_create_fbdma(core, dmac->base.handle); + else + if (nv_device(core)->card_type < NV_D0) + ret = nvc0_dmac_create_fbdma(core, dmac->base.handle); + else + ret = nvd0_dmac_create_fbdma(core, dmac->base.handle); + return ret; +} + +struct nv50_mast { + struct nv50_dmac base; +}; + +struct nv50_curs { + struct nv50_pioc base; +}; + +struct nv50_sync { + struct nv50_dmac base; + u32 addr; + u32 data; +}; + +struct nv50_ovly { + struct nv50_dmac base; +}; + +struct nv50_oimm { + struct nv50_pioc base; +}; + +struct nv50_head { + struct nouveau_crtc base; + struct nv50_curs curs; + struct nv50_sync sync; + struct nv50_ovly ovly; + struct nv50_oimm oimm; +}; + +#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c)) +#define nv50_curs(c) (&nv50_head(c)->curs) +#define nv50_sync(c) (&nv50_head(c)->sync) +#define nv50_ovly(c) (&nv50_head(c)->ovly) +#define nv50_oimm(c) (&nv50_head(c)->oimm) +#define nv50_chan(c) (&(c)->base.base) +#define nv50_vers(c) nv_mclass(nv50_chan(c)->user) + +struct nv50_disp { + struct nouveau_object *core; + struct nv50_mast mast; + + u32 modeset; + + struct nouveau_bo *sync; +}; + +static struct nv50_disp * +nv50_disp(struct drm_device *dev) +{ + return nouveau_display(dev)->priv; +} + +#define nv50_mast(d) (&nv50_disp(d)->mast) + +static struct drm_crtc * +nv50_display_crtc_get(struct drm_encoder *encoder) +{ + return nouveau_encoder(encoder)->crtc; +} + +/****************************************************************************** + * EVO channel helpers + *****************************************************************************/ +static u32 * +evo_wait(void *evoc, int nr) +{ + struct nv50_dmac *dmac = evoc; + u32 put = nv_ro32(dmac->base.user, 0x0000) / 4; + + mutex_lock(&dmac->lock); + if (put + nr >= (PAGE_SIZE / 4) - 8) { + dmac->ptr[put] = 0x20000000; + + nv_wo32(dmac->base.user, 0x0000, 0x00000000); + if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) { + mutex_unlock(&dmac->lock); + NV_ERROR(dmac->base.user, "channel stalled\n"); + return NULL; + } + + put = 0; + } + + return dmac->ptr + put; +} + +static void +evo_kick(u32 *push, void *evoc) +{ + struct nv50_dmac *dmac = evoc; + nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2); + mutex_unlock(&dmac->lock); +} + +#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m)) +#define evo_data(p,d) *((p)++) = (d) + +static bool +evo_sync_wait(void *data) +{ + if (nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000) + return true; + usleep_range(1, 2); + return false; +} + +static int +evo_sync(struct drm_device *dev) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nv50_disp *disp = nv50_disp(dev); + struct nv50_mast *mast = nv50_mast(dev); + u32 *push = evo_wait(mast, 8); + if (push) { + nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000); + evo_mthd(push, 0x0084, 1); + evo_data(push, 0x80000000 | EVO_MAST_NTFY); + evo_mthd(push, 0x0080, 2); + evo_data(push, 0x00000000); + evo_data(push, 0x00000000); + evo_kick(push, mast); + if (nv_wait_cb(device, evo_sync_wait, disp->sync)) + return 0; + } + + return -EBUSY; +} + +/****************************************************************************** + * Page flipping channel + *****************************************************************************/ +struct nouveau_bo * +nv50_display_crtc_sema(struct drm_device *dev, int crtc) +{ + return nv50_disp(dev)->sync; +} + +struct nv50_display_flip { + struct nv50_disp *disp; + struct nv50_sync *chan; +}; + +static bool +nv50_display_flip_wait(void *data) +{ + struct nv50_display_flip *flip = data; + if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) == + flip->chan->data) + return true; + usleep_range(1, 2); + return false; +} + +void +nv50_display_flip_stop(struct drm_crtc *crtc) +{ + struct nouveau_device *device = nouveau_dev(crtc->dev); + struct nv50_display_flip flip = { + .disp = nv50_disp(crtc->dev), + .chan = nv50_sync(crtc), + }; + u32 *push; + + push = evo_wait(flip.chan, 8); + if (push) { + evo_mthd(push, 0x0084, 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0094, 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x00c0, 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + evo_kick(push, flip.chan); + } + + nv_wait_cb(device, nv50_display_flip_wait, &flip); +} + +int +nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, + struct nouveau_channel *chan, u32 swap_interval) +{ + struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nv50_sync *sync = nv50_sync(crtc); + int head = nv_crtc->index, ret; + u32 *push; + + swap_interval <<= 4; + if (swap_interval == 0) + swap_interval |= 0x100; + if (chan == NULL) + evo_sync(crtc->dev); + + push = evo_wait(sync, 128); + if (unlikely(push == NULL)) + return -EBUSY; + + if (chan && nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) { + ret = RING_SPACE(chan, 8); + if (ret) + return ret; + + BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); + OUT_RING (chan, NvEvoSema0 + head); + OUT_RING (chan, sync->addr ^ 0x10); + BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); + OUT_RING (chan, sync->data + 1); + BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2); + OUT_RING (chan, sync->addr); + OUT_RING (chan, sync->data); + } else + if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { + u64 addr = nv84_fence_crtc(chan, head) + sync->addr; + ret = RING_SPACE(chan, 12); + if (ret) + return ret; + + BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); + OUT_RING (chan, chan->vram); + BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + OUT_RING (chan, upper_32_bits(addr ^ 0x10)); + OUT_RING (chan, lower_32_bits(addr ^ 0x10)); + OUT_RING (chan, sync->data + 1); + OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG); + BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + OUT_RING (chan, upper_32_bits(addr)); + OUT_RING (chan, lower_32_bits(addr)); + OUT_RING (chan, sync->data); + OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); + } else + if (chan) { + u64 addr = nv84_fence_crtc(chan, head) + sync->addr; + ret = RING_SPACE(chan, 10); + if (ret) + return ret; + + BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + OUT_RING (chan, upper_32_bits(addr ^ 0x10)); + OUT_RING (chan, lower_32_bits(addr ^ 0x10)); + OUT_RING (chan, sync->data + 1); + OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG | + NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD); + BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + OUT_RING (chan, upper_32_bits(addr)); + OUT_RING (chan, lower_32_bits(addr)); + OUT_RING (chan, sync->data); + OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL | + NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD); + } + + if (chan) { + sync->addr ^= 0x10; + sync->data++; + FIRE_RING (chan); + } + + /* queue the flip */ + evo_mthd(push, 0x0100, 1); + evo_data(push, 0xfffe0000); + evo_mthd(push, 0x0084, 1); + evo_data(push, swap_interval); + if (!(swap_interval & 0x00000100)) { + evo_mthd(push, 0x00e0, 1); + evo_data(push, 0x40000000); + } + evo_mthd(push, 0x0088, 4); + evo_data(push, sync->addr); + evo_data(push, sync->data++); + evo_data(push, sync->data); + evo_data(push, NvEvoSync); + evo_mthd(push, 0x00a0, 2); + evo_data(push, 0x00000000); + evo_data(push, 0x00000000); + evo_mthd(push, 0x00c0, 1); + evo_data(push, nv_fb->r_dma); + evo_mthd(push, 0x0110, 2); + evo_data(push, 0x00000000); + evo_data(push, 0x00000000); + if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) { + evo_mthd(push, 0x0800, 5); + evo_data(push, nv_fb->nvbo->bo.offset >> 8); + evo_data(push, 0); + evo_data(push, (fb->height << 16) | fb->width); + evo_data(push, nv_fb->r_pitch); + evo_data(push, nv_fb->r_format); + } else { + evo_mthd(push, 0x0400, 5); + evo_data(push, nv_fb->nvbo->bo.offset >> 8); + evo_data(push, 0); + evo_data(push, (fb->height << 16) | fb->width); + evo_data(push, nv_fb->r_pitch); + evo_data(push, nv_fb->r_format); + } + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + evo_kick(push, sync); + return 0; +} + +/****************************************************************************** + * CRTC + *****************************************************************************/ +static int +nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update) +{ + struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); + struct nouveau_connector *nv_connector; + struct drm_connector *connector; + u32 *push, mode = 0x00; + + nv_connector = nouveau_crtc_connector_get(nv_crtc); + connector = &nv_connector->base; + if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) { + if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3) + mode = DITHERING_MODE_DYNAMIC2X2; + } else { + mode = nv_connector->dithering_mode; + } + + if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) { + if (connector->display_info.bpc >= 8) + mode |= DITHERING_DEPTH_8BPC; + } else { + mode |= nv_connector->dithering_depth; + } + + push = evo_wait(mast, 4); + if (push) { + if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1); + evo_data(push, mode); + } else + if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) { + evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1); + evo_data(push, mode); + } else { + evo_mthd(push, 0x04a0 + (nv_crtc->index * 0x0300), 1); + evo_data(push, mode); + } + + if (update) { + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + } + evo_kick(push, mast); + } + + return 0; +} + +static int +nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update) +{ + struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); + struct drm_display_mode *omode, *umode = &nv_crtc->base.mode; + struct drm_crtc *crtc = &nv_crtc->base; + struct nouveau_connector *nv_connector; + int mode = DRM_MODE_SCALE_NONE; + u32 oX, oY, *push; + + /* start off at the resolution we programmed the crtc for, this + * effectively handles NONE/FULL scaling + */ + nv_connector = nouveau_crtc_connector_get(nv_crtc); + if (nv_connector && nv_connector->native_mode) + mode = nv_connector->scaling_mode; + + if (mode != DRM_MODE_SCALE_NONE) + omode = nv_connector->native_mode; + else + omode = umode; + + oX = omode->hdisplay; + oY = omode->vdisplay; + if (omode->flags & DRM_MODE_FLAG_DBLSCAN) + oY *= 2; + + /* add overscan compensation if necessary, will keep the aspect + * ratio the same as the backend mode unless overridden by the + * user setting both hborder and vborder properties. + */ + if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON || + (nv_connector->underscan == UNDERSCAN_AUTO && + nv_connector->edid && + drm_detect_hdmi_monitor(nv_connector->edid)))) { + u32 bX = nv_connector->underscan_hborder; + u32 bY = nv_connector->underscan_vborder; + u32 aspect = (oY << 19) / oX; + + if (bX) { + oX -= (bX * 2); + if (bY) oY -= (bY * 2); + else oY = ((oX * aspect) + (aspect / 2)) >> 19; + } else { + oX -= (oX >> 4) + 32; + if (bY) oY -= (bY * 2); + else oY = ((oX * aspect) + (aspect / 2)) >> 19; + } + } + + /* handle CENTER/ASPECT scaling, taking into account the areas + * removed already for overscan compensation + */ + switch (mode) { + case DRM_MODE_SCALE_CENTER: + oX = min((u32)umode->hdisplay, oX); + oY = min((u32)umode->vdisplay, oY); + /* fall-through */ + case DRM_MODE_SCALE_ASPECT: + if (oY < oX) { + u32 aspect = (umode->hdisplay << 19) / umode->vdisplay; + oX = ((oY * aspect) + (aspect / 2)) >> 19; + } else { + u32 aspect = (umode->vdisplay << 19) / umode->hdisplay; + oY = ((oX * aspect) + (aspect / 2)) >> 19; + } + break; + default: + break; + } + + push = evo_wait(mast, 8); + if (push) { + if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + /*XXX: SCALE_CTRL_ACTIVE??? */ + evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2); + evo_data(push, (oY << 16) | oX); + evo_data(push, (oY << 16) | oX); + evo_mthd(push, 0x08a4 + (nv_crtc->index * 0x400), 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x08c8 + (nv_crtc->index * 0x400), 1); + evo_data(push, umode->vdisplay << 16 | umode->hdisplay); + } else { + evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3); + evo_data(push, (oY << 16) | oX); + evo_data(push, (oY << 16) | oX); + evo_data(push, (oY << 16) | oX); + evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1); + evo_data(push, umode->vdisplay << 16 | umode->hdisplay); + } + + evo_kick(push, mast); + + if (update) { + nv50_display_flip_stop(crtc); + nv50_display_flip_next(crtc, crtc->fb, NULL, 1); + } + } + + return 0; +} + +static int +nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update) +{ + struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); + u32 *push, hue, vib; + int adj; + + adj = (nv_crtc->color_vibrance > 0) ? 50 : 0; + vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff; + hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff; + + push = evo_wait(mast, 16); + if (push) { + if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1); + evo_data(push, (hue << 20) | (vib << 8)); + } else { + evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1); + evo_data(push, (hue << 20) | (vib << 8)); + } + + if (update) { + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + } + evo_kick(push, mast); + } + + return 0; +} + +static int +nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb, + int x, int y, bool update) +{ + struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb); + struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); + u32 *push; + + push = evo_wait(mast, 16); + if (push) { + if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1); + evo_data(push, nvfb->nvbo->bo.offset >> 8); + evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3); + evo_data(push, (fb->height << 16) | fb->width); + evo_data(push, nvfb->r_pitch); + evo_data(push, nvfb->r_format); + evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1); + evo_data(push, (y << 16) | x); + if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) { + evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); + evo_data(push, nvfb->r_dma); + } + } else { + evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1); + evo_data(push, nvfb->nvbo->bo.offset >> 8); + evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4); + evo_data(push, (fb->height << 16) | fb->width); + evo_data(push, nvfb->r_pitch); + evo_data(push, nvfb->r_format); + evo_data(push, nvfb->r_dma); + evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1); + evo_data(push, (y << 16) | x); + } + + if (update) { + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + } + evo_kick(push, mast); + } + + nv_crtc->fb.tile_flags = nvfb->r_dma; + return 0; +} + +static void +nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc) +{ + struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); + u32 *push = evo_wait(mast, 16); + if (push) { + if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { + evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2); + evo_data(push, 0x85000000); + evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); + } else + if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2); + evo_data(push, 0x85000000); + evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); + evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1); + evo_data(push, NvEvoVRAM); + } else { + evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2); + evo_data(push, 0x85000000); + evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); + evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); + evo_data(push, NvEvoVRAM); + } + evo_kick(push, mast); + } +} + +static void +nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc) +{ + struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); + u32 *push = evo_wait(mast, 16); + if (push) { + if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { + evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1); + evo_data(push, 0x05000000); + } else + if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1); + evo_data(push, 0x05000000); + evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1); + evo_data(push, 0x00000000); + } else { + evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x05000000); + evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x00000000); + } + evo_kick(push, mast); + } +} + +static void +nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update) +{ + struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); + + if (show) + nv50_crtc_cursor_show(nv_crtc); + else + nv50_crtc_cursor_hide(nv_crtc); + + if (update) { + u32 *push = evo_wait(mast, 2); + if (push) { + evo_mthd(push, 0x0080, 1); + evo_data(push, 0x00000000); + evo_kick(push, mast); + } + } +} + +static void +nv50_crtc_dpms(struct drm_crtc *crtc, int mode) +{ +} + +static void +nv50_crtc_prepare(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nv50_mast *mast = nv50_mast(crtc->dev); + u32 *push; + + nv50_display_flip_stop(crtc); + + push = evo_wait(mast, 2); + if (push) { + if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { + evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1); + evo_data(push, 0x40000000); + } else + if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1); + evo_data(push, 0x40000000); + evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1); + evo_data(push, 0x00000000); + } else { + evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x03000000); + evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x00000000); + } + + evo_kick(push, mast); + } + + nv50_crtc_cursor_show_hide(nv_crtc, false, false); +} + +static void +nv50_crtc_commit(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nv50_mast *mast = nv50_mast(crtc->dev); + u32 *push; + + push = evo_wait(mast, 32); + if (push) { + if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { + evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); + evo_data(push, NvEvoVRAM_LP); + evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2); + evo_data(push, 0xc0000000); + evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); + } else + if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); + evo_data(push, nv_crtc->fb.tile_flags); + evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2); + evo_data(push, 0xc0000000); + evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); + evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1); + evo_data(push, NvEvoVRAM); + } else { + evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); + evo_data(push, nv_crtc->fb.tile_flags); + evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4); + evo_data(push, 0x83000000); + evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); + evo_data(push, 0x00000000); + evo_data(push, 0x00000000); + evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); + evo_data(push, NvEvoVRAM); + evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1); + evo_data(push, 0xffffff00); + } + + evo_kick(push, mast); + } + + nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true); + nv50_display_flip_next(crtc, crtc->fb, NULL, 1); +} + +static bool +nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static int +nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) +{ + struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); + int ret; + + ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); + if (ret) + return ret; + + if (old_fb) { + nvfb = nouveau_framebuffer(old_fb); + nouveau_bo_unpin(nvfb->nvbo); + } + + return 0; +} + +static int +nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, + struct drm_display_mode *mode, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct nv50_mast *mast = nv50_mast(crtc->dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nouveau_connector *nv_connector; + u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1; + u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1; + u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks; + u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks; + u32 vblan2e = 0, vblan2s = 1; + u32 *push; + int ret; + + hactive = mode->htotal; + hsynce = mode->hsync_end - mode->hsync_start - 1; + hbackp = mode->htotal - mode->hsync_end; + hblanke = hsynce + hbackp; + hfrontp = mode->hsync_start - mode->hdisplay; + hblanks = mode->htotal - hfrontp - 1; + + vactive = mode->vtotal * vscan / ilace; + vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1; + vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace; + vblanke = vsynce + vbackp; + vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace; + vblanks = vactive - vfrontp - 1; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) { + vblan2e = vactive + vsynce + vbackp; + vblan2s = vblan2e + (mode->vdisplay * vscan / ilace); + vactive = (vactive * 2) + 1; + } + + ret = nv50_crtc_swap_fbs(crtc, old_fb); + if (ret) + return ret; + + push = evo_wait(mast, 64); + if (push) { + if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2); + evo_data(push, 0x00800000 | mode->clock); + evo_data(push, (ilace == 2) ? 2 : 0); + evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6); + evo_data(push, 0x00000000); + evo_data(push, (vactive << 16) | hactive); + evo_data(push, ( vsynce << 16) | hsynce); + evo_data(push, (vblanke << 16) | hblanke); + evo_data(push, (vblanks << 16) | hblanks); + evo_data(push, (vblan2e << 16) | vblan2s); + evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1); + evo_data(push, 0x00000000); + evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2); + evo_data(push, 0x00000311); + evo_data(push, 0x00000100); + } else { + evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6); + evo_data(push, 0x00000000); + evo_data(push, (vactive << 16) | hactive); + evo_data(push, ( vsynce << 16) | hsynce); + evo_data(push, (vblanke << 16) | hblanke); + evo_data(push, (vblanks << 16) | hblanks); + evo_data(push, (vblan2e << 16) | vblan2s); + evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1); + evo_data(push, 0x00000000); /* ??? */ + evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3); + evo_data(push, mode->clock * 1000); + evo_data(push, 0x00200000); /* ??? */ + evo_data(push, mode->clock * 1000); + evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2); + evo_data(push, 0x00000311); + evo_data(push, 0x00000100); + } + + evo_kick(push, mast); + } + + nv_connector = nouveau_crtc_connector_get(nv_crtc); + nv50_crtc_set_dither(nv_crtc, false); + nv50_crtc_set_scale(nv_crtc, false); + nv50_crtc_set_color_vibrance(nv_crtc, false); + nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, false); + return 0; +} + +static int +nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct nouveau_drm *drm = nouveau_drm(crtc->dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + int ret; + + if (!crtc->fb) { + NV_DEBUG(drm, "No FB bound\n"); + return 0; + } + + ret = nv50_crtc_swap_fbs(crtc, old_fb); + if (ret) + return ret; + + nv50_display_flip_stop(crtc); + nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, true); + nv50_display_flip_next(crtc, crtc->fb, NULL, 1); + return 0; +} + +static int +nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, int x, int y, + enum mode_set_atomic state) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + nv50_display_flip_stop(crtc); + nv50_crtc_set_image(nv_crtc, fb, x, y, true); + return 0; +} + +static void +nv50_crtc_lut_load(struct drm_crtc *crtc) +{ + struct nv50_disp *disp = nv50_disp(crtc->dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo); + int i; + + for (i = 0; i < 256; i++) { + u16 r = nv_crtc->lut.r[i] >> 2; + u16 g = nv_crtc->lut.g[i] >> 2; + u16 b = nv_crtc->lut.b[i] >> 2; + + if (nv_mclass(disp->core) < NVD0_DISP_CLASS) { + writew(r + 0x0000, lut + (i * 0x08) + 0); + writew(g + 0x0000, lut + (i * 0x08) + 2); + writew(b + 0x0000, lut + (i * 0x08) + 4); + } else { + writew(r + 0x6000, lut + (i * 0x20) + 0); + writew(g + 0x6000, lut + (i * 0x20) + 2); + writew(b + 0x6000, lut + (i * 0x20) + 4); + } + } +} + +static int +nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t handle, uint32_t width, uint32_t height) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_gem_object *gem; + struct nouveau_bo *nvbo; + bool visible = (handle != 0); + int i, ret = 0; + + if (visible) { + if (width != 64 || height != 64) + return -EINVAL; + + gem = drm_gem_object_lookup(dev, file_priv, handle); + if (unlikely(!gem)) + return -ENOENT; + nvbo = nouveau_gem_object(gem); + + ret = nouveau_bo_map(nvbo); + if (ret == 0) { + for (i = 0; i < 64 * 64; i++) { + u32 v = nouveau_bo_rd32(nvbo, i); + nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v); + } + nouveau_bo_unmap(nvbo); + } + + drm_gem_object_unreference_unlocked(gem); + } + + if (visible != nv_crtc->cursor.visible) { + nv50_crtc_cursor_show_hide(nv_crtc, visible, true); + nv_crtc->cursor.visible = visible; + } + + return ret; +} + +static int +nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct nv50_curs *curs = nv50_curs(crtc); + struct nv50_chan *chan = nv50_chan(curs); + nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff)); + nv_wo32(chan->user, 0x0080, 0x00000000); + return 0; +} + +static void +nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, + uint32_t start, uint32_t size) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + u32 end = max(start + size, (u32)256); + u32 i; + + for (i = start; i < end; i++) { + nv_crtc->lut.r[i] = r[i]; + nv_crtc->lut.g[i] = g[i]; + nv_crtc->lut.b[i] = b[i]; + } + + nv50_crtc_lut_load(crtc); +} + +static void +nv50_crtc_destroy(struct drm_crtc *crtc) +{ + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nv50_disp *disp = nv50_disp(crtc->dev); + struct nv50_head *head = nv50_head(crtc); + nv50_dmac_destroy(disp->core, &head->ovly.base); + nv50_pioc_destroy(disp->core, &head->oimm.base); + nv50_dmac_destroy(disp->core, &head->sync.base); + nv50_pioc_destroy(disp->core, &head->curs.base); + nouveau_bo_unmap(nv_crtc->cursor.nvbo); + if (nv_crtc->cursor.nvbo) + nouveau_bo_unpin(nv_crtc->cursor.nvbo); + nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); + nouveau_bo_unmap(nv_crtc->lut.nvbo); + if (nv_crtc->lut.nvbo) + nouveau_bo_unpin(nv_crtc->lut.nvbo); + nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); + drm_crtc_cleanup(crtc); + kfree(crtc); +} + +static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = { + .dpms = nv50_crtc_dpms, + .prepare = nv50_crtc_prepare, + .commit = nv50_crtc_commit, + .mode_fixup = nv50_crtc_mode_fixup, + .mode_set = nv50_crtc_mode_set, + .mode_set_base = nv50_crtc_mode_set_base, + .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, + .load_lut = nv50_crtc_lut_load, +}; + +static const struct drm_crtc_funcs nv50_crtc_func = { + .cursor_set = nv50_crtc_cursor_set, + .cursor_move = nv50_crtc_cursor_move, + .gamma_set = nv50_crtc_gamma_set, + .set_config = drm_crtc_helper_set_config, + .destroy = nv50_crtc_destroy, + .page_flip = nouveau_crtc_page_flip, +}; + +static void +nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) +{ +} + +static void +nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) +{ +} + +static int +nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index) +{ + struct nv50_disp *disp = nv50_disp(dev); + struct nv50_head *head; + struct drm_crtc *crtc; + int ret, i; + + head = kzalloc(sizeof(*head), GFP_KERNEL); + if (!head) + return -ENOMEM; + + head->base.index = index; + head->base.set_dither = nv50_crtc_set_dither; + head->base.set_scale = nv50_crtc_set_scale; + head->base.set_color_vibrance = nv50_crtc_set_color_vibrance; + head->base.color_vibrance = 50; + head->base.vibrant_hue = 0; + head->base.cursor.set_offset = nv50_cursor_set_offset; + head->base.cursor.set_pos = nv50_cursor_set_pos; + for (i = 0; i < 256; i++) { + head->base.lut.r[i] = i << 8; + head->base.lut.g[i] = i << 8; + head->base.lut.b[i] = i << 8; + } + + crtc = &head->base.base; + drm_crtc_init(dev, crtc, &nv50_crtc_func); + drm_crtc_helper_add(crtc, &nv50_crtc_hfunc); + drm_mode_crtc_set_gamma_size(crtc, 256); + + ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &head->base.lut.nvbo); + if (!ret) { + ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM); + if (!ret) { + ret = nouveau_bo_map(head->base.lut.nvbo); + if (ret) + nouveau_bo_unpin(head->base.lut.nvbo); + } + if (ret) + nouveau_bo_ref(NULL, &head->base.lut.nvbo); + } + + if (ret) + goto out; + + nv50_crtc_lut_load(crtc); + + /* allocate cursor resources */ + ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index, + &(struct nv50_display_curs_class) { + .head = index, + }, sizeof(struct nv50_display_curs_class), + &head->curs.base); + if (ret) + goto out; + + ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &head->base.cursor.nvbo); + if (!ret) { + ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM); + if (!ret) { + ret = nouveau_bo_map(head->base.cursor.nvbo); + if (ret) + nouveau_bo_unpin(head->base.lut.nvbo); + } + if (ret) + nouveau_bo_ref(NULL, &head->base.cursor.nvbo); + } + + if (ret) + goto out; + + /* allocate page flip / sync resources */ + ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index, + &(struct nv50_display_sync_class) { + .pushbuf = EVO_PUSH_HANDLE(SYNC, index), + .head = index, + }, sizeof(struct nv50_display_sync_class), + disp->sync->bo.offset, &head->sync.base); + if (ret) + goto out; + + head->sync.addr = EVO_FLIP_SEM0(index); + head->sync.data = 0x00000000; + + /* allocate overlay resources */ + ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index, + &(struct nv50_display_oimm_class) { + .head = index, + }, sizeof(struct nv50_display_oimm_class), + &head->oimm.base); + if (ret) + goto out; + + ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index, + &(struct nv50_display_ovly_class) { + .pushbuf = EVO_PUSH_HANDLE(OVLY, index), + .head = index, + }, sizeof(struct nv50_display_ovly_class), + disp->sync->bo.offset, &head->ovly.base); + if (ret) + goto out; + +out: + if (ret) + nv50_crtc_destroy(crtc); + return ret; +} + +/****************************************************************************** + * DAC + *****************************************************************************/ +static void +nv50_dac_dpms(struct drm_encoder *encoder, int mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nv50_disp *disp = nv50_disp(encoder->dev); + int or = nv_encoder->or; + u32 dpms_ctrl; + + dpms_ctrl = 0x00000000; + if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF) + dpms_ctrl |= 0x00000001; + if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF) + dpms_ctrl |= 0x00000004; + + nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl); +} + +static bool +nv50_dac_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *nv_connector; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (nv_connector && nv_connector->native_mode) { + if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { + int id = adjusted_mode->base.id; + *adjusted_mode = *nv_connector->native_mode; + adjusted_mode->base.id = id; + } + } + + return true; +} + +static void +nv50_dac_commit(struct drm_encoder *encoder) +{ +} + +static void +nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nv50_mast *mast = nv50_mast(encoder->dev); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + u32 *push; + + nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON); + + push = evo_wait(mast, 8); + if (push) { + if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + u32 syncs = 0x00000000; + + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + syncs |= 0x00000001; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + syncs |= 0x00000002; + + evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2); + evo_data(push, 1 << nv_crtc->index); + evo_data(push, syncs); + } else { + u32 magic = 0x31ec6000 | (nv_crtc->index << 25); + u32 syncs = 0x00000001; + + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + syncs |= 0x00000008; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + syncs |= 0x00000010; + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + magic |= 0x00000001; + + evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2); + evo_data(push, syncs); + evo_data(push, magic); + evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1); + evo_data(push, 1 << nv_crtc->index); + } + + evo_kick(push, mast); + } + + nv_encoder->crtc = encoder->crtc; +} + +static void +nv50_dac_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nv50_mast *mast = nv50_mast(encoder->dev); + const int or = nv_encoder->or; + u32 *push; + + if (nv_encoder->crtc) { + nv50_crtc_prepare(nv_encoder->crtc); + + push = evo_wait(mast, 4); + if (push) { + if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + evo_mthd(push, 0x0400 + (or * 0x080), 1); + evo_data(push, 0x00000000); + } else { + evo_mthd(push, 0x0180 + (or * 0x020), 1); + evo_data(push, 0x00000000); + } + evo_kick(push, mast); + } + } + + nv_encoder->crtc = NULL; +} + +static enum drm_connector_status +nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) +{ + struct nv50_disp *disp = nv50_disp(encoder->dev); + int ret, or = nouveau_encoder(encoder)->or; + u32 load = nouveau_drm(encoder->dev)->vbios.dactestval; + if (load == 0) + load = 340; + + ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load)); + if (ret || load != 7) + return connector_status_disconnected; + + return connector_status_connected; +} + +static void +nv50_dac_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); + kfree(encoder); +} + +static const struct drm_encoder_helper_funcs nv50_dac_hfunc = { + .dpms = nv50_dac_dpms, + .mode_fixup = nv50_dac_mode_fixup, + .prepare = nv50_dac_disconnect, + .commit = nv50_dac_commit, + .mode_set = nv50_dac_mode_set, + .disable = nv50_dac_disconnect, + .get_crtc = nv50_display_crtc_get, + .detect = nv50_dac_detect +}; + +static const struct drm_encoder_funcs nv50_dac_func = { + .destroy = nv50_dac_destroy, +}; + +static int +nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe) +{ + struct nouveau_drm *drm = nouveau_drm(connector->dev); + struct nouveau_i2c *i2c = nouveau_i2c(drm->device); + struct nouveau_encoder *nv_encoder; + struct drm_encoder *encoder; + int type = DRM_MODE_ENCODER_DAC; + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + nv_encoder->dcb = dcbe; + nv_encoder->or = ffs(dcbe->or) - 1; + nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index); + + encoder = to_drm_encoder(nv_encoder); + encoder->possible_crtcs = dcbe->heads; + encoder->possible_clones = 0; + drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type); + drm_encoder_helper_add(encoder, &nv50_dac_hfunc); + + drm_mode_connector_attach_encoder(connector, encoder); + return 0; +} + +/****************************************************************************** + * Audio + *****************************************************************************/ +static void +nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *nv_connector; + struct nv50_disp *disp = nv50_disp(encoder->dev); + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (!drm_detect_monitor_audio(nv_connector->edid)) + return; + + drm_edid_to_eld(&nv_connector->base, nv_connector->edid); + + nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, + nv_connector->base.eld, + nv_connector->base.eld[2] * 4); +} + +static void +nv50_audio_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nv50_disp *disp = nv50_disp(encoder->dev); + + nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0); +} + +/****************************************************************************** + * HDMI + *****************************************************************************/ +static void +nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct nouveau_connector *nv_connector; + struct nv50_disp *disp = nv50_disp(encoder->dev); + const u32 moff = (nv_crtc->index << 3) | nv_encoder->or; + u32 rekey = 56; /* binary driver, and tegra constant */ + u32 max_ac_packet; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (!drm_detect_hdmi_monitor(nv_connector->edid)) + return; + + max_ac_packet = mode->htotal - mode->hdisplay; + max_ac_packet -= rekey; + max_ac_packet -= 18; /* constant from tegra */ + max_ac_packet /= 32; + + nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, + NV84_DISP_SOR_HDMI_PWR_STATE_ON | + (max_ac_packet << 16) | rekey); + + nv50_audio_mode_set(encoder, mode); +} + +static void +nv50_hdmi_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc); + struct nv50_disp *disp = nv50_disp(encoder->dev); + const u32 moff = (nv_crtc->index << 3) | nv_encoder->or; + + nv50_audio_disconnect(encoder); + + nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000); +} + +/****************************************************************************** + * SOR + *****************************************************************************/ +static void +nv50_sor_dpms(struct drm_encoder *encoder, int mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct nv50_disp *disp = nv50_disp(dev); + struct drm_encoder *partner; + int or = nv_encoder->or; + + nv_encoder->last_dpms = mode; + + list_for_each_entry(partner, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_partner = nouveau_encoder(partner); + + if (partner->encoder_type != DRM_MODE_ENCODER_TMDS) + continue; + + if (nv_partner != nv_encoder && + nv_partner->dcb->or == nv_encoder->dcb->or) { + if (nv_partner->last_dpms == DRM_MODE_DPMS_ON) + return; + break; + } + } + + nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON)); +} + +static bool +nv50_sor_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *nv_connector; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (nv_connector && nv_connector->native_mode) { + if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { + int id = adjusted_mode->base.id; + *adjusted_mode = *nv_connector->native_mode; + adjusted_mode->base.id = id; + } + } + + return true; +} + +static void +nv50_sor_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nv50_mast *mast = nv50_mast(encoder->dev); + const int or = nv_encoder->or; + u32 *push; + + if (nv_encoder->crtc) { + nv50_crtc_prepare(nv_encoder->crtc); + + push = evo_wait(mast, 4); + if (push) { + if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + evo_mthd(push, 0x0600 + (or * 0x40), 1); + evo_data(push, 0x00000000); + } else { + evo_mthd(push, 0x0200 + (or * 0x20), 1); + evo_data(push, 0x00000000); + } + evo_kick(push, mast); + } + + nv50_hdmi_disconnect(encoder); + } + + nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; + nv_encoder->crtc = NULL; +} + +static void +nv50_sor_commit(struct drm_encoder *encoder) +{ +} + +static void +nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, + struct drm_display_mode *mode) +{ + struct nv50_disp *disp = nv50_disp(encoder->dev); + struct nv50_mast *mast = nv50_mast(encoder->dev); + struct drm_device *dev = encoder->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct nouveau_connector *nv_connector; + struct nvbios *bios = &drm->vbios; + u32 *push, lvds = 0; + u8 owner = 1 << nv_crtc->index; + u8 proto = 0xf; + u8 depth = 0x0; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + switch (nv_encoder->dcb->type) { + case DCB_OUTPUT_TMDS: + if (nv_encoder->dcb->sorconf.link & 1) { + if (mode->clock < 165000) + proto = 0x1; + else + proto = 0x5; + } else { + proto = 0x2; + } + + nv50_hdmi_mode_set(encoder, mode); + break; + case DCB_OUTPUT_LVDS: + proto = 0x0; + + if (bios->fp_no_ddc) { + if (bios->fp.dual_link) + lvds |= 0x0100; + if (bios->fp.if_is_24bit) + lvds |= 0x0200; + } else { + if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) { + if (((u8 *)nv_connector->edid)[121] == 2) + lvds |= 0x0100; + } else + if (mode->clock >= bios->fp.duallink_transition_clk) { + lvds |= 0x0100; + } + + if (lvds & 0x0100) { + if (bios->fp.strapless_is_24bit & 2) + lvds |= 0x0200; + } else { + if (bios->fp.strapless_is_24bit & 1) + lvds |= 0x0200; + } + + if (nv_connector->base.display_info.bpc == 8) + lvds |= 0x0200; + } + + nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds); + break; + case DCB_OUTPUT_DP: + if (nv_connector->base.display_info.bpc == 6) { + nv_encoder->dp.datarate = mode->clock * 18 / 8; + depth = 0x2; + } else + if (nv_connector->base.display_info.bpc == 8) { + nv_encoder->dp.datarate = mode->clock * 24 / 8; + depth = 0x5; + } else { + nv_encoder->dp.datarate = mode->clock * 30 / 8; + depth = 0x6; + } + + if (nv_encoder->dcb->sorconf.link & 1) + proto = 0x8; + else + proto = 0x9; + break; + default: + BUG_ON(1); + break; + } + + nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); + + push = evo_wait(nv50_mast(dev), 8); + if (push) { + if (nv50_vers(mast) < NVD0_DISP_CLASS) { + u32 ctrl = (depth << 16) | (proto << 8) | owner; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + ctrl |= 0x00001000; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + ctrl |= 0x00002000; + evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1); + evo_data(push, ctrl); + } else { + u32 magic = 0x31ec6000 | (nv_crtc->index << 25); + u32 syncs = 0x00000001; + + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + syncs |= 0x00000008; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + syncs |= 0x00000010; + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + magic |= 0x00000001; + + evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2); + evo_data(push, syncs | (depth << 6)); + evo_data(push, magic); + evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 1); + evo_data(push, owner | (proto << 8)); + } + + evo_kick(push, mast); + } + + nv_encoder->crtc = encoder->crtc; +} + +static void +nv50_sor_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); + kfree(encoder); +} + +static const struct drm_encoder_helper_funcs nv50_sor_hfunc = { + .dpms = nv50_sor_dpms, + .mode_fixup = nv50_sor_mode_fixup, + .prepare = nv50_sor_disconnect, + .commit = nv50_sor_commit, + .mode_set = nv50_sor_mode_set, + .disable = nv50_sor_disconnect, + .get_crtc = nv50_display_crtc_get, +}; + +static const struct drm_encoder_funcs nv50_sor_func = { + .destroy = nv50_sor_destroy, +}; + +static int +nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) +{ + struct nouveau_drm *drm = nouveau_drm(connector->dev); + struct nouveau_i2c *i2c = nouveau_i2c(drm->device); + struct nouveau_encoder *nv_encoder; + struct drm_encoder *encoder; + int type; + + switch (dcbe->type) { + case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break; + case DCB_OUTPUT_TMDS: + case DCB_OUTPUT_DP: + default: + type = DRM_MODE_ENCODER_TMDS; + break; + } + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + nv_encoder->dcb = dcbe; + nv_encoder->or = ffs(dcbe->or) - 1; + nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index); + nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; + + encoder = to_drm_encoder(nv_encoder); + encoder->possible_crtcs = dcbe->heads; + encoder->possible_clones = 0; + drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type); + drm_encoder_helper_add(encoder, &nv50_sor_hfunc); + + drm_mode_connector_attach_encoder(connector, encoder); + return 0; +} + +/****************************************************************************** + * PIOR + *****************************************************************************/ + +static void +nv50_pior_dpms(struct drm_encoder *encoder, int mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nv50_disp *disp = nv50_disp(encoder->dev); + u32 mthd = (nv_encoder->dcb->type << 12) | nv_encoder->or; + u32 ctrl = (mode == DRM_MODE_DPMS_ON); + nv_call(disp->core, NV50_DISP_PIOR_PWR + mthd, ctrl); +} + +static bool +nv50_pior_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *nv_connector; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (nv_connector && nv_connector->native_mode) { + if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { + int id = adjusted_mode->base.id; + *adjusted_mode = *nv_connector->native_mode; + adjusted_mode->base.id = id; + } + } + + adjusted_mode->clock *= 2; + return true; +} + +static void +nv50_pior_commit(struct drm_encoder *encoder) +{ +} + +static void +nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct nv50_mast *mast = nv50_mast(encoder->dev); + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct nouveau_connector *nv_connector; + u8 owner = 1 << nv_crtc->index; + u8 proto, depth; + u32 *push; + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + switch (nv_connector->base.display_info.bpc) { + case 10: depth = 0x6; break; + case 8: depth = 0x5; break; + case 6: depth = 0x2; break; + default: depth = 0x0; break; + } + + switch (nv_encoder->dcb->type) { + case DCB_OUTPUT_TMDS: + case DCB_OUTPUT_DP: + proto = 0x0; + break; + default: + BUG_ON(1); + break; + } + + nv50_pior_dpms(encoder, DRM_MODE_DPMS_ON); + + push = evo_wait(mast, 8); + if (push) { + if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + u32 ctrl = (depth << 16) | (proto << 8) | owner; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + ctrl |= 0x00001000; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + ctrl |= 0x00002000; + evo_mthd(push, 0x0700 + (nv_encoder->or * 0x040), 1); + evo_data(push, ctrl); + } + + evo_kick(push, mast); + } + + nv_encoder->crtc = encoder->crtc; +} + +static void +nv50_pior_disconnect(struct drm_encoder *encoder) +{ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nv50_mast *mast = nv50_mast(encoder->dev); + const int or = nv_encoder->or; + u32 *push; + + if (nv_encoder->crtc) { + nv50_crtc_prepare(nv_encoder->crtc); + + push = evo_wait(mast, 4); + if (push) { + if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + evo_mthd(push, 0x0700 + (or * 0x040), 1); + evo_data(push, 0x00000000); + } + evo_kick(push, mast); + } + } + + nv_encoder->crtc = NULL; +} + +static void +nv50_pior_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); + kfree(encoder); +} + +static const struct drm_encoder_helper_funcs nv50_pior_hfunc = { + .dpms = nv50_pior_dpms, + .mode_fixup = nv50_pior_mode_fixup, + .prepare = nv50_pior_disconnect, + .commit = nv50_pior_commit, + .mode_set = nv50_pior_mode_set, + .disable = nv50_pior_disconnect, + .get_crtc = nv50_display_crtc_get, +}; + +static const struct drm_encoder_funcs nv50_pior_func = { + .destroy = nv50_pior_destroy, +}; + +static int +nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) +{ + struct nouveau_drm *drm = nouveau_drm(connector->dev); + struct nouveau_i2c *i2c = nouveau_i2c(drm->device); + struct nouveau_i2c_port *ddc = NULL; + struct nouveau_encoder *nv_encoder; + struct drm_encoder *encoder; + int type; + + switch (dcbe->type) { + case DCB_OUTPUT_TMDS: + ddc = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(dcbe->extdev)); + type = DRM_MODE_ENCODER_TMDS; + break; + case DCB_OUTPUT_DP: + ddc = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(dcbe->extdev)); + type = DRM_MODE_ENCODER_TMDS; + break; + default: + return -ENODEV; + } + + nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); + if (!nv_encoder) + return -ENOMEM; + nv_encoder->dcb = dcbe; + nv_encoder->or = ffs(dcbe->or) - 1; + nv_encoder->i2c = ddc; + + encoder = to_drm_encoder(nv_encoder); + encoder->possible_crtcs = dcbe->heads; + encoder->possible_clones = 0; + drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type); + drm_encoder_helper_add(encoder, &nv50_pior_hfunc); + + drm_mode_connector_attach_encoder(connector, encoder); + return 0; +} + +/****************************************************************************** + * Init + *****************************************************************************/ +void +nv50_display_fini(struct drm_device *dev) +{ +} + +int +nv50_display_init(struct drm_device *dev) +{ + struct nv50_disp *disp = nv50_disp(dev); + struct drm_crtc *crtc; + u32 *push; + + push = evo_wait(nv50_mast(dev), 32); + if (!push) + return -EBUSY; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nv50_sync *sync = nv50_sync(crtc); + nouveau_bo_wr32(disp->sync, sync->addr / 4, sync->data); + } + + evo_mthd(push, 0x0088, 1); + evo_data(push, NvEvoSync); + evo_kick(push, nv50_mast(dev)); + return 0; +} + +void +nv50_display_destroy(struct drm_device *dev) +{ + struct nv50_disp *disp = nv50_disp(dev); + + nv50_dmac_destroy(disp->core, &disp->mast.base); + + nouveau_bo_unmap(disp->sync); + if (disp->sync) + nouveau_bo_unpin(disp->sync); + nouveau_bo_ref(NULL, &disp->sync); + + nouveau_display(dev)->priv = NULL; + kfree(disp); +} + +int +nv50_display_create(struct drm_device *dev) +{ + static const u16 oclass[] = { + NVF0_DISP_CLASS, + NVE0_DISP_CLASS, + NVD0_DISP_CLASS, + NVA3_DISP_CLASS, + NV94_DISP_CLASS, + NVA0_DISP_CLASS, + NV84_DISP_CLASS, + NV50_DISP_CLASS, + }; + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct dcb_table *dcb = &drm->vbios.dcb; + struct drm_connector *connector, *tmp; + struct nv50_disp *disp; + struct dcb_output *dcbe; + int crtcs, ret, i; + + disp = kzalloc(sizeof(*disp), GFP_KERNEL); + if (!disp) + return -ENOMEM; + + nouveau_display(dev)->priv = disp; + nouveau_display(dev)->dtor = nv50_display_destroy; + nouveau_display(dev)->init = nv50_display_init; + nouveau_display(dev)->fini = nv50_display_fini; + + /* small shared memory area we use for notifiers and semaphores */ + ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &disp->sync); + if (!ret) { + ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM); + if (!ret) { + ret = nouveau_bo_map(disp->sync); + if (ret) + nouveau_bo_unpin(disp->sync); + } + if (ret) + nouveau_bo_ref(NULL, &disp->sync); + } + + if (ret) + goto out; + + /* attempt to allocate a supported evo display class */ + ret = -ENODEV; + for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) { + ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, + 0xd1500000, oclass[i], NULL, 0, + &disp->core); + } + + if (ret) + goto out; + + /* allocate master evo channel */ + ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0, + &(struct nv50_display_mast_class) { + .pushbuf = EVO_PUSH_HANDLE(MAST, 0), + }, sizeof(struct nv50_display_mast_class), + disp->sync->bo.offset, &disp->mast.base); + if (ret) + goto out; + + /* create crtc objects to represent the hw heads */ + if (nv_mclass(disp->core) >= NVD0_DISP_CLASS) + crtcs = nv_rd32(device, 0x022448); + else + crtcs = 2; + + for (i = 0; i < crtcs; i++) { + ret = nv50_crtc_create(dev, disp->core, i); + if (ret) + goto out; + } + + /* create encoder/connector objects based on VBIOS DCB table */ + for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) { + connector = nouveau_connector_create(dev, dcbe->connector); + if (IS_ERR(connector)) + continue; + + if (dcbe->location == DCB_LOC_ON_CHIP) { + switch (dcbe->type) { + case DCB_OUTPUT_TMDS: + case DCB_OUTPUT_LVDS: + case DCB_OUTPUT_DP: + ret = nv50_sor_create(connector, dcbe); + break; + case DCB_OUTPUT_ANALOG: + ret = nv50_dac_create(connector, dcbe); + break; + default: + ret = -ENODEV; + break; + } + } else { + ret = nv50_pior_create(connector, dcbe); + } + + if (ret) { + NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n", + dcbe->location, dcbe->type, + ffs(dcbe->or) - 1, ret); + ret = 0; + } + } + + /* cull any connectors we created that don't have an encoder */ + list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { + if (connector->encoder_ids[0]) + continue; + + NV_WARN(drm, "%s has no encoders, removing\n", + drm_get_connector_name(connector)); + connector->funcs->destroy(connector); + } + +out: + if (ret) + nv50_display_destroy(dev); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h new file mode 100644 index 0000000..70da347 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_display.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2008 Maarten Maathuis. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NV50_DISPLAY_H__ +#define __NV50_DISPLAY_H__ + +#include "nouveau_display.h" +#include "nouveau_crtc.h" +#include "nouveau_reg.h" + +int nv50_display_create(struct drm_device *); +void nv50_display_destroy(struct drm_device *); +int nv50_display_init(struct drm_device *); +void nv50_display_fini(struct drm_device *); + +void nv50_display_flip_stop(struct drm_crtc *); +int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *, + struct nouveau_channel *, u32 swap_interval); + +struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head); + +#endif /* __NV50_DISPLAY_H__ */ diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c new file mode 100644 index 0000000..52068a0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -0,0 +1,259 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_fbcon.h" + +int +nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); + struct nouveau_channel *chan = drm->channel; + int ret; + + ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11); + if (ret) + return ret; + + if (rect->rop != ROP_COPY) { + BEGIN_NV04(chan, NvSub2D, 0x02ac, 1); + OUT_RING(chan, 1); + } + BEGIN_NV04(chan, NvSub2D, 0x0588, 1); + if (info->fix.visual == FB_VISUAL_TRUECOLOR || + info->fix.visual == FB_VISUAL_DIRECTCOLOR) + OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]); + else + OUT_RING(chan, rect->color); + BEGIN_NV04(chan, NvSub2D, 0x0600, 4); + OUT_RING(chan, rect->dx); + OUT_RING(chan, rect->dy); + OUT_RING(chan, rect->dx + rect->width); + OUT_RING(chan, rect->dy + rect->height); + if (rect->rop != ROP_COPY) { + BEGIN_NV04(chan, NvSub2D, 0x02ac, 1); + OUT_RING(chan, 3); + } + FIRE_RING(chan); + return 0; +} + +int +nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); + struct nouveau_channel *chan = drm->channel; + int ret; + + ret = RING_SPACE(chan, 12); + if (ret) + return ret; + + BEGIN_NV04(chan, NvSub2D, 0x0110, 1); + OUT_RING(chan, 0); + BEGIN_NV04(chan, NvSub2D, 0x08b0, 4); + OUT_RING(chan, region->dx); + OUT_RING(chan, region->dy); + OUT_RING(chan, region->width); + OUT_RING(chan, region->height); + BEGIN_NV04(chan, NvSub2D, 0x08d0, 4); + OUT_RING(chan, 0); + OUT_RING(chan, region->sx); + OUT_RING(chan, 0); + OUT_RING(chan, region->sy); + FIRE_RING(chan); + return 0; +} + +int +nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); + struct nouveau_channel *chan = drm->channel; + uint32_t width, dwords, *data = (uint32_t *)image->data; + uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); + uint32_t *palette = info->pseudo_palette; + int ret; + + if (image->depth != 1) + return -ENODEV; + + ret = RING_SPACE(chan, 11); + if (ret) + return ret; + + width = ALIGN(image->width, 32); + dwords = (width * image->height) >> 5; + + BEGIN_NV04(chan, NvSub2D, 0x0814, 2); + if (info->fix.visual == FB_VISUAL_TRUECOLOR || + info->fix.visual == FB_VISUAL_DIRECTCOLOR) { + OUT_RING(chan, palette[image->bg_color] | mask); + OUT_RING(chan, palette[image->fg_color] | mask); + } else { + OUT_RING(chan, image->bg_color); + OUT_RING(chan, image->fg_color); + } + BEGIN_NV04(chan, NvSub2D, 0x0838, 2); + OUT_RING(chan, image->width); + OUT_RING(chan, image->height); + BEGIN_NV04(chan, NvSub2D, 0x0850, 4); + OUT_RING(chan, 0); + OUT_RING(chan, image->dx); + OUT_RING(chan, 0); + OUT_RING(chan, image->dy); + + while (dwords) { + int push = dwords > 2047 ? 2047 : dwords; + + ret = RING_SPACE(chan, push + 1); + if (ret) + return ret; + + dwords -= push; + + BEGIN_NI04(chan, NvSub2D, 0x0860, push); + OUT_RINGp(chan, data, push); + data += push; + } + + FIRE_RING(chan); + return 0; +} + +int +nv50_fbcon_accel_init(struct fb_info *info) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb; + struct drm_device *dev = nfbdev->dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_channel *chan = drm->channel; + struct nouveau_object *object; + int ret, format; + + switch (info->var.bits_per_pixel) { + case 8: + format = 0xf3; + break; + case 15: + format = 0xf8; + break; + case 16: + format = 0xe8; + break; + case 32: + switch (info->var.transp.length) { + case 0: /* depth 24 */ + case 8: /* depth 32, just use 24.. */ + format = 0xe6; + break; + case 2: /* depth 30 */ + format = 0xd1; + break; + default: + return -EINVAL; + } + break; + default: + return -EINVAL; + } + + ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D, + 0x502d, NULL, 0, &object); + if (ret) + return ret; + + ret = RING_SPACE(chan, 59); + if (ret) { + nouveau_fbcon_gpu_lockup(info); + return ret; + } + + BEGIN_NV04(chan, NvSub2D, 0x0000, 1); + OUT_RING(chan, Nv2D); + BEGIN_NV04(chan, NvSub2D, 0x0184, 3); + OUT_RING(chan, NvDmaFB); + OUT_RING(chan, NvDmaFB); + OUT_RING(chan, NvDmaFB); + BEGIN_NV04(chan, NvSub2D, 0x0290, 1); + OUT_RING(chan, 0); + BEGIN_NV04(chan, NvSub2D, 0x0888, 1); + OUT_RING(chan, 1); + BEGIN_NV04(chan, NvSub2D, 0x02ac, 1); + OUT_RING(chan, 3); + BEGIN_NV04(chan, NvSub2D, 0x02a0, 1); + OUT_RING(chan, 0x55); + BEGIN_NV04(chan, NvSub2D, 0x08c0, 4); + OUT_RING(chan, 0); + OUT_RING(chan, 1); + OUT_RING(chan, 0); + OUT_RING(chan, 1); + BEGIN_NV04(chan, NvSub2D, 0x0580, 2); + OUT_RING(chan, 4); + OUT_RING(chan, format); + BEGIN_NV04(chan, NvSub2D, 0x02e8, 2); + OUT_RING(chan, 2); + OUT_RING(chan, 1); + BEGIN_NV04(chan, NvSub2D, 0x0804, 1); + OUT_RING(chan, format); + BEGIN_NV04(chan, NvSub2D, 0x0800, 1); + OUT_RING(chan, 1); + BEGIN_NV04(chan, NvSub2D, 0x0808, 3); + OUT_RING(chan, 0); + OUT_RING(chan, 0); + OUT_RING(chan, 1); + BEGIN_NV04(chan, NvSub2D, 0x081c, 1); + OUT_RING(chan, 1); + BEGIN_NV04(chan, NvSub2D, 0x0840, 4); + OUT_RING(chan, 0); + OUT_RING(chan, 1); + OUT_RING(chan, 0); + OUT_RING(chan, 1); + BEGIN_NV04(chan, NvSub2D, 0x0200, 2); + OUT_RING(chan, format); + OUT_RING(chan, 1); + BEGIN_NV04(chan, NvSub2D, 0x0214, 5); + OUT_RING(chan, info->fix.line_length); + OUT_RING(chan, info->var.xres_virtual); + OUT_RING(chan, info->var.yres_virtual); + OUT_RING(chan, upper_32_bits(fb->vma.offset)); + OUT_RING(chan, lower_32_bits(fb->vma.offset)); + BEGIN_NV04(chan, NvSub2D, 0x0230, 2); + OUT_RING(chan, format); + OUT_RING(chan, 1); + BEGIN_NV04(chan, NvSub2D, 0x0244, 5); + OUT_RING(chan, info->fix.line_length); + OUT_RING(chan, info->var.xres_virtual); + OUT_RING(chan, info->var.yres_virtual); + OUT_RING(chan, upper_32_bits(fb->vma.offset)); + OUT_RING(chan, lower_32_bits(fb->vma.offset)); + + return 0; +} + diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c new file mode 100644 index 0000000..0ee3638 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_fence.c @@ -0,0 +1,123 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ + +#include <core/object.h> +#include <core/class.h> + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nv10_fence.h" + +#include "nv50_display.h" + +static int +nv50_fence_context_new(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->drm->dev; + struct nv10_fence_priv *priv = chan->drm->fence; + struct nv10_fence_chan *fctx; + struct ttm_mem_reg *mem = &priv->bo->bo.mem; + struct nouveau_object *object; + u32 start = mem->start * PAGE_SIZE; + u32 limit = start + mem->size - 1; + int ret, i; + + fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); + if (!fctx) + return -ENOMEM; + + nouveau_fence_context_new(&fctx->base); + fctx->base.emit = nv10_fence_emit; + fctx->base.read = nv10_fence_read; + fctx->base.sync = nv17_fence_sync; + + ret = nouveau_object_new(nv_object(chan->cli), chan->handle, + NvSema, 0x003d, + &(struct nv_dma_class) { + .flags = NV_DMA_TARGET_VRAM | + NV_DMA_ACCESS_RDWR, + .start = start, + .limit = limit, + }, sizeof(struct nv_dma_class), + &object); + + /* dma objects for display sync channel semaphore blocks */ + for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) { + struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); + u32 start = bo->bo.mem.start * PAGE_SIZE; + u32 limit = start + bo->bo.mem.size - 1; + + ret = nouveau_object_new(nv_object(chan->cli), chan->handle, + NvEvoSema0 + i, 0x003d, + &(struct nv_dma_class) { + .flags = NV_DMA_TARGET_VRAM | + NV_DMA_ACCESS_RDWR, + .start = start, + .limit = limit, + }, sizeof(struct nv_dma_class), + &object); + } + + if (ret) + nv10_fence_context_del(chan); + return ret; +} + +int +nv50_fence_create(struct nouveau_drm *drm) +{ + struct nv10_fence_priv *priv; + int ret = 0; + + priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base.dtor = nv10_fence_destroy; + priv->base.resume = nv17_fence_resume; + priv->base.context_new = nv50_fence_context_new; + priv->base.context_del = nv10_fence_context_del; + spin_lock_init(&priv->lock); + + ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, + 0, 0x0000, NULL, &priv->bo); + if (!ret) { + ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); + if (!ret) { + ret = nouveau_bo_map(priv->bo); + if (ret) + nouveau_bo_unpin(priv->bo); + } + if (ret) + nouveau_bo_ref(NULL, &priv->bo); + } + + if (ret) { + nv10_fence_destroy(drm); + return ret; + } + + nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c new file mode 100644 index 0000000..69620e3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_pm.c @@ -0,0 +1,855 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <drm/drmP.h> +#include "nouveau_drm.h" +#include "nouveau_bios.h" +#include "dispnv04/hw.h" +#include "nouveau_pm.h" +#include "nouveau_hwsq.h" + +#include "nv50_display.h" + +#include <subdev/bios/pll.h> +#include <subdev/clock.h> +#include <subdev/timer.h> +#include <subdev/fb.h> + +enum clk_src { + clk_src_crystal, + clk_src_href, + clk_src_hclk, + clk_src_hclkm3, + clk_src_hclkm3d2, + clk_src_host, + clk_src_nvclk, + clk_src_sclk, + clk_src_mclk, + clk_src_vdec, + clk_src_dom6 +}; + +static u32 read_clk(struct drm_device *, enum clk_src); + +static u32 +read_div(struct drm_device *dev) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + + switch (nv_device(drm->device)->chipset) { + case 0x50: /* it exists, but only has bit 31, not the dividers.. */ + case 0x84: + case 0x86: + case 0x98: + case 0xa0: + return nv_rd32(device, 0x004700); + case 0x92: + case 0x94: + case 0x96: + return nv_rd32(device, 0x004800); + default: + return 0x00000000; + } +} + +static u32 +read_pll_src(struct drm_device *dev, u32 base) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + u32 coef, ref = read_clk(dev, clk_src_crystal); + u32 rsel = nv_rd32(device, 0x00e18c); + int P, N, M, id; + + switch (nv_device(drm->device)->chipset) { + case 0x50: + case 0xa0: + switch (base) { + case 0x4020: + case 0x4028: id = !!(rsel & 0x00000004); break; + case 0x4008: id = !!(rsel & 0x00000008); break; + case 0x4030: id = 0; break; + default: + NV_ERROR(drm, "ref: bad pll 0x%06x\n", base); + return 0; + } + + coef = nv_rd32(device, 0x00e81c + (id * 0x0c)); + ref *= (coef & 0x01000000) ? 2 : 4; + P = (coef & 0x00070000) >> 16; + N = ((coef & 0x0000ff00) >> 8) + 1; + M = ((coef & 0x000000ff) >> 0) + 1; + break; + case 0x84: + case 0x86: + case 0x92: + coef = nv_rd32(device, 0x00e81c); + P = (coef & 0x00070000) >> 16; + N = (coef & 0x0000ff00) >> 8; + M = (coef & 0x000000ff) >> 0; + break; + case 0x94: + case 0x96: + case 0x98: + rsel = nv_rd32(device, 0x00c050); + switch (base) { + case 0x4020: rsel = (rsel & 0x00000003) >> 0; break; + case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break; + case 0x4028: rsel = (rsel & 0x00001800) >> 11; break; + case 0x4030: rsel = 3; break; + default: + NV_ERROR(drm, "ref: bad pll 0x%06x\n", base); + return 0; + } + + switch (rsel) { + case 0: id = 1; break; + case 1: return read_clk(dev, clk_src_crystal); + case 2: return read_clk(dev, clk_src_href); + case 3: id = 0; break; + } + + coef = nv_rd32(device, 0x00e81c + (id * 0x28)); + P = (nv_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7; + P += (coef & 0x00070000) >> 16; + N = (coef & 0x0000ff00) >> 8; + M = (coef & 0x000000ff) >> 0; + break; + default: + BUG_ON(1); + } + + if (M) + return (ref * N / M) >> P; + return 0; +} + +static u32 +read_pll_ref(struct drm_device *dev, u32 base) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + u32 src, mast = nv_rd32(device, 0x00c040); + + switch (base) { + case 0x004028: + src = !!(mast & 0x00200000); + break; + case 0x004020: + src = !!(mast & 0x00400000); + break; + case 0x004008: + src = !!(mast & 0x00010000); + break; + case 0x004030: + src = !!(mast & 0x02000000); + break; + case 0x00e810: + return read_clk(dev, clk_src_crystal); + default: + NV_ERROR(drm, "bad pll 0x%06x\n", base); + return 0; + } + + if (src) + return read_clk(dev, clk_src_href); + return read_pll_src(dev, base); +} + +static u32 +read_pll(struct drm_device *dev, u32 base) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + u32 mast = nv_rd32(device, 0x00c040); + u32 ctrl = nv_rd32(device, base + 0); + u32 coef = nv_rd32(device, base + 4); + u32 ref = read_pll_ref(dev, base); + u32 clk = 0; + int N1, N2, M1, M2; + + if (base == 0x004028 && (mast & 0x00100000)) { + /* wtf, appears to only disable post-divider on nva0 */ + if (nv_device(drm->device)->chipset != 0xa0) + return read_clk(dev, clk_src_dom6); + } + + N2 = (coef & 0xff000000) >> 24; + M2 = (coef & 0x00ff0000) >> 16; + N1 = (coef & 0x0000ff00) >> 8; + M1 = (coef & 0x000000ff); + if ((ctrl & 0x80000000) && M1) { + clk = ref * N1 / M1; + if ((ctrl & 0x40000100) == 0x40000000) { + if (M2) + clk = clk * N2 / M2; + else + clk = 0; + } + } + + return clk; +} + +static u32 +read_clk(struct drm_device *dev, enum clk_src src) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + u32 mast = nv_rd32(device, 0x00c040); + u32 P = 0; + + switch (src) { + case clk_src_crystal: + return device->crystal; + case clk_src_href: + return 100000; /* PCIE reference clock */ + case clk_src_hclk: + return read_clk(dev, clk_src_href) * 27778 / 10000; + case clk_src_hclkm3: + return read_clk(dev, clk_src_hclk) * 3; + case clk_src_hclkm3d2: + return read_clk(dev, clk_src_hclk) * 3 / 2; + case clk_src_host: + switch (mast & 0x30000000) { + case 0x00000000: return read_clk(dev, clk_src_href); + case 0x10000000: break; + case 0x20000000: /* !0x50 */ + case 0x30000000: return read_clk(dev, clk_src_hclk); + } + break; + case clk_src_nvclk: + if (!(mast & 0x00100000)) + P = (nv_rd32(device, 0x004028) & 0x00070000) >> 16; + switch (mast & 0x00000003) { + case 0x00000000: return read_clk(dev, clk_src_crystal) >> P; + case 0x00000001: return read_clk(dev, clk_src_dom6); + case 0x00000002: return read_pll(dev, 0x004020) >> P; + case 0x00000003: return read_pll(dev, 0x004028) >> P; + } + break; + case clk_src_sclk: + P = (nv_rd32(device, 0x004020) & 0x00070000) >> 16; + switch (mast & 0x00000030) { + case 0x00000000: + if (mast & 0x00000080) + return read_clk(dev, clk_src_host) >> P; + return read_clk(dev, clk_src_crystal) >> P; + case 0x00000010: break; + case 0x00000020: return read_pll(dev, 0x004028) >> P; + case 0x00000030: return read_pll(dev, 0x004020) >> P; + } + break; + case clk_src_mclk: + P = (nv_rd32(device, 0x004008) & 0x00070000) >> 16; + if (nv_rd32(device, 0x004008) & 0x00000200) { + switch (mast & 0x0000c000) { + case 0x00000000: + return read_clk(dev, clk_src_crystal) >> P; + case 0x00008000: + case 0x0000c000: + return read_clk(dev, clk_src_href) >> P; + } + } else { + return read_pll(dev, 0x004008) >> P; + } + break; + case clk_src_vdec: + P = (read_div(dev) & 0x00000700) >> 8; + switch (nv_device(drm->device)->chipset) { + case 0x84: + case 0x86: + case 0x92: + case 0x94: + case 0x96: + case 0xa0: + switch (mast & 0x00000c00) { + case 0x00000000: + if (nv_device(drm->device)->chipset == 0xa0) /* wtf?? */ + return read_clk(dev, clk_src_nvclk) >> P; + return read_clk(dev, clk_src_crystal) >> P; + case 0x00000400: + return 0; + case 0x00000800: + if (mast & 0x01000000) + return read_pll(dev, 0x004028) >> P; + return read_pll(dev, 0x004030) >> P; + case 0x00000c00: + return read_clk(dev, clk_src_nvclk) >> P; + } + break; + case 0x98: + switch (mast & 0x00000c00) { + case 0x00000000: + return read_clk(dev, clk_src_nvclk) >> P; + case 0x00000400: + return 0; + case 0x00000800: + return read_clk(dev, clk_src_hclkm3d2) >> P; + case 0x00000c00: + return read_clk(dev, clk_src_mclk) >> P; + } + break; + } + break; + case clk_src_dom6: + switch (nv_device(drm->device)->chipset) { + case 0x50: + case 0xa0: + return read_pll(dev, 0x00e810) >> 2; + case 0x84: + case 0x86: + case 0x92: + case 0x94: + case 0x96: + case 0x98: + P = (read_div(dev) & 0x00000007) >> 0; + switch (mast & 0x0c000000) { + case 0x00000000: return read_clk(dev, clk_src_href); + case 0x04000000: break; + case 0x08000000: return read_clk(dev, clk_src_hclk); + case 0x0c000000: + return read_clk(dev, clk_src_hclkm3) >> P; + } + break; + default: + break; + } + default: + break; + } + + NV_DEBUG(drm, "unknown clock source %d 0x%08x\n", src, mast); + return 0; +} + +int +nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + if (nv_device(drm->device)->chipset == 0xaa || + nv_device(drm->device)->chipset == 0xac) + return 0; + + perflvl->core = read_clk(dev, clk_src_nvclk); + perflvl->shader = read_clk(dev, clk_src_sclk); + perflvl->memory = read_clk(dev, clk_src_mclk); + if (nv_device(drm->device)->chipset != 0x50) { + perflvl->vdec = read_clk(dev, clk_src_vdec); + perflvl->dom6 = read_clk(dev, clk_src_dom6); + } + + return 0; +} + +struct nv50_pm_state { + struct nouveau_pm_level *perflvl; + struct hwsq_ucode eclk_hwsq; + struct hwsq_ucode mclk_hwsq; + u32 mscript; + u32 mmast; + u32 mctrl; + u32 mcoef; +}; + +static u32 +calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll, + u32 clk, int *N1, int *M1, int *log2P) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_bios *bios = nouveau_bios(device); + struct nouveau_clock *pclk = nouveau_clock(device); + struct nouveau_pll_vals coef; + int ret; + + ret = nvbios_pll_parse(bios, reg, pll); + if (ret) + return 0; + + pll->vco2.max_freq = 0; + pll->refclk = read_pll_ref(dev, reg); + if (!pll->refclk) + return 0; + + ret = pclk->pll_calc(pclk, pll, clk, &coef); + if (ret == 0) + return 0; + + *N1 = coef.N1; + *M1 = coef.M1; + *log2P = coef.log2P; + return ret; +} + +static inline u32 +calc_div(u32 src, u32 target, int *div) +{ + u32 clk0 = src, clk1 = src; + for (*div = 0; *div <= 7; (*div)++) { + if (clk0 <= target) { + clk1 = clk0 << (*div ? 1 : 0); + break; + } + clk0 >>= 1; + } + + if (target - clk0 <= clk1 - target) + return clk0; + (*div)--; + return clk1; +} + +static inline u32 +clk_same(u32 a, u32 b) +{ + return ((a / 1000) == (b / 1000)); +} + +static void +mclk_precharge(struct nouveau_mem_exec_func *exec) +{ + struct nv50_pm_state *info = exec->priv; + struct hwsq_ucode *hwsq = &info->mclk_hwsq; + + hwsq_wr32(hwsq, 0x1002d4, 0x00000001); +} + +static void +mclk_refresh(struct nouveau_mem_exec_func *exec) +{ + struct nv50_pm_state *info = exec->priv; + struct hwsq_ucode *hwsq = &info->mclk_hwsq; + + hwsq_wr32(hwsq, 0x1002d0, 0x00000001); +} + +static void +mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable) +{ + struct nv50_pm_state *info = exec->priv; + struct hwsq_ucode *hwsq = &info->mclk_hwsq; + + hwsq_wr32(hwsq, 0x100210, enable ? 0x80000000 : 0x00000000); +} + +static void +mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable) +{ + struct nv50_pm_state *info = exec->priv; + struct hwsq_ucode *hwsq = &info->mclk_hwsq; + + hwsq_wr32(hwsq, 0x1002dc, enable ? 0x00000001 : 0x00000000); +} + +static void +mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec) +{ + struct nv50_pm_state *info = exec->priv; + struct hwsq_ucode *hwsq = &info->mclk_hwsq; + + if (nsec > 1000) + hwsq_usec(hwsq, (nsec + 500) / 1000); +} + +static u32 +mclk_mrg(struct nouveau_mem_exec_func *exec, int mr) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + if (mr <= 1) + return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4)); + if (mr <= 3) + return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4)); + return 0; +} + +static void +mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + struct nouveau_fb *pfb = nouveau_fb(device); + struct nv50_pm_state *info = exec->priv; + struct hwsq_ucode *hwsq = &info->mclk_hwsq; + + if (mr <= 1) { + if (pfb->ram.ranks > 1) + hwsq_wr32(hwsq, 0x1002c8 + ((mr - 0) * 4), data); + hwsq_wr32(hwsq, 0x1002c0 + ((mr - 0) * 4), data); + } else + if (mr <= 3) { + if (pfb->ram.ranks > 1) + hwsq_wr32(hwsq, 0x1002e8 + ((mr - 2) * 4), data); + hwsq_wr32(hwsq, 0x1002e0 + ((mr - 2) * 4), data); + } +} + +static void +mclk_clock_set(struct nouveau_mem_exec_func *exec) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + struct nv50_pm_state *info = exec->priv; + struct hwsq_ucode *hwsq = &info->mclk_hwsq; + u32 ctrl = nv_rd32(device, 0x004008); + + info->mmast = nv_rd32(device, 0x00c040); + info->mmast &= ~0xc0000000; /* get MCLK_2 from HREF */ + info->mmast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */ + + hwsq_wr32(hwsq, 0xc040, info->mmast); + hwsq_wr32(hwsq, 0x4008, ctrl | 0x00000200); /* bypass MPLL */ + if (info->mctrl & 0x80000000) + hwsq_wr32(hwsq, 0x400c, info->mcoef); + hwsq_wr32(hwsq, 0x4008, info->mctrl); +} + +static void +mclk_timing_set(struct nouveau_mem_exec_func *exec) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + struct nv50_pm_state *info = exec->priv; + struct nouveau_pm_level *perflvl = info->perflvl; + struct hwsq_ucode *hwsq = &info->mclk_hwsq; + int i; + + for (i = 0; i < 9; i++) { + u32 reg = 0x100220 + (i * 4); + u32 val = nv_rd32(device, reg); + if (val != perflvl->timing.reg[i]) + hwsq_wr32(hwsq, reg, perflvl->timing.reg[i]); + } +} + +static int +calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl, + struct nv50_pm_state *info) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nouveau_dev(dev); + u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */ + struct nouveau_mem_exec_func exec = { + .dev = dev, + .precharge = mclk_precharge, + .refresh = mclk_refresh, + .refresh_auto = mclk_refresh_auto, + .refresh_self = mclk_refresh_self, + .wait = mclk_wait, + .mrg = mclk_mrg, + .mrs = mclk_mrs, + .clock_set = mclk_clock_set, + .timing_set = mclk_timing_set, + .priv = info + }; + struct hwsq_ucode *hwsq = &info->mclk_hwsq; + struct nvbios_pll pll; + int N, M, P; + int ret; + + /* use pcie refclock if possible, otherwise use mpll */ + info->mctrl = nv_rd32(device, 0x004008); + info->mctrl &= ~0x81ff0200; + if (clk_same(perflvl->memory, read_clk(dev, clk_src_href))) { + info->mctrl |= 0x00000200 | (pll.bias_p << 19); + } else { + ret = calc_pll(dev, 0x4008, &pll, perflvl->memory, &N, &M, &P); + if (ret == 0) + return -EINVAL; + + info->mctrl |= 0x80000000 | (P << 22) | (P << 16); + info->mctrl |= pll.bias_p << 19; + info->mcoef = (N << 8) | M; + } + + /* build the ucode which will reclock the memory for us */ + hwsq_init(hwsq); + if (crtc_mask) { + hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */ + hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */ + } + if (nv_device(drm->device)->chipset >= 0x92) + hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */ + hwsq_setf(hwsq, 0x10, 0); /* disable bus access */ + hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */ + + ret = nouveau_mem_exec(&exec, perflvl); + if (ret) + return ret; + + hwsq_setf(hwsq, 0x10, 1); /* enable bus access */ + hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */ + if (nv_device(drm->device)->chipset >= 0x92) + hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */ + hwsq_fini(hwsq); + return 0; +} + +void * +nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv50_pm_state *info; + struct hwsq_ucode *hwsq; + struct nvbios_pll pll; + u32 out, mast, divs, ctrl; + int clk, ret = -EINVAL; + int N, M, P1, P2; + + if (nv_device(drm->device)->chipset == 0xaa || + nv_device(drm->device)->chipset == 0xac) + return ERR_PTR(-ENODEV); + + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); + info->perflvl = perflvl; + + /* memory: build hwsq ucode which we'll use to reclock memory. + * use pcie refclock if possible, otherwise use mpll */ + info->mclk_hwsq.len = 0; + if (perflvl->memory) { + ret = calc_mclk(dev, perflvl, info); + if (ret) + goto error; + info->mscript = perflvl->memscript; + } + + divs = read_div(dev); + mast = info->mmast; + + /* start building HWSQ script for engine reclocking */ + hwsq = &info->eclk_hwsq; + hwsq_init(hwsq); + hwsq_setf(hwsq, 0x10, 0); /* disable bus access */ + hwsq_op5f(hwsq, 0x00, 0x01); /* wait for access disabled? */ + + /* vdec/dom6: switch to "safe" clocks temporarily */ + if (perflvl->vdec) { + mast &= ~0x00000c00; + divs &= ~0x00000700; + } + + if (perflvl->dom6) { + mast &= ~0x0c000000; + divs &= ~0x00000007; + } + + hwsq_wr32(hwsq, 0x00c040, mast); + + /* vdec: avoid modifying xpll until we know exactly how the other + * clock domains work, i suspect at least some of them can also be + * tied to xpll... + */ + if (perflvl->vdec) { + /* see how close we can get using nvclk as a source */ + clk = calc_div(perflvl->core, perflvl->vdec, &P1); + + /* see how close we can get using xpll/hclk as a source */ + if (nv_device(drm->device)->chipset != 0x98) + out = read_pll(dev, 0x004030); + else + out = read_clk(dev, clk_src_hclkm3d2); + out = calc_div(out, perflvl->vdec, &P2); + + /* select whichever gets us closest */ + if (abs((int)perflvl->vdec - clk) <= + abs((int)perflvl->vdec - out)) { + if (nv_device(drm->device)->chipset != 0x98) + mast |= 0x00000c00; + divs |= P1 << 8; + } else { + mast |= 0x00000800; + divs |= P2 << 8; + } + } + + /* dom6: nfi what this is, but we're limited to various combinations + * of the host clock frequency + */ + if (perflvl->dom6) { + if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) { + mast |= 0x00000000; + } else + if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) { + mast |= 0x08000000; + } else { + clk = read_clk(dev, clk_src_hclk) * 3; + clk = calc_div(clk, perflvl->dom6, &P1); + + mast |= 0x0c000000; + divs |= P1; + } + } + + /* vdec/dom6: complete switch to new clocks */ + switch (nv_device(drm->device)->chipset) { + case 0x92: + case 0x94: + case 0x96: + hwsq_wr32(hwsq, 0x004800, divs); + break; + default: + hwsq_wr32(hwsq, 0x004700, divs); + break; + } + + hwsq_wr32(hwsq, 0x00c040, mast); + + /* core/shader: make sure sclk/nvclk are disconnected from their + * PLLs (nvclk to dom6, sclk to hclk) + */ + if (nv_device(drm->device)->chipset < 0x92) + mast = (mast & ~0x001000b0) | 0x00100080; + else + mast = (mast & ~0x000000b3) | 0x00000081; + + hwsq_wr32(hwsq, 0x00c040, mast); + + /* core: for the moment at least, always use nvpll */ + clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1); + if (clk == 0) + goto error; + + ctrl = nv_rd32(device, 0x004028) & ~0xc03f0100; + mast &= ~0x00100000; + mast |= 3; + + hwsq_wr32(hwsq, 0x004028, 0x80000000 | (P1 << 19) | (P1 << 16) | ctrl); + hwsq_wr32(hwsq, 0x00402c, (N << 8) | M); + + /* shader: tie to nvclk if possible, otherwise use spll. have to be + * very careful that the shader clock is at least twice the core, or + * some chipsets will be very unhappy. i expect most or all of these + * cases will be handled by tying to nvclk, but it's possible there's + * corners + */ + ctrl = nv_rd32(device, 0x004020) & ~0xc03f0100; + + if (P1-- && perflvl->shader == (perflvl->core << 1)) { + hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl); + hwsq_wr32(hwsq, 0x00c040, 0x00000020 | mast); + } else { + clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1); + if (clk == 0) + goto error; + ctrl |= 0x80000000; + + hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl); + hwsq_wr32(hwsq, 0x004024, (N << 8) | M); + hwsq_wr32(hwsq, 0x00c040, 0x00000030 | mast); + } + + hwsq_setf(hwsq, 0x10, 1); /* enable bus access */ + hwsq_op5f(hwsq, 0x00, 0x00); /* wait for access enabled? */ + hwsq_fini(hwsq); + + return info; +error: + kfree(info); + return ERR_PTR(ret); +} + +static int +prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + u32 hwsq_data, hwsq_kick; + int i; + + if (nv_device(drm->device)->chipset < 0x94) { + hwsq_data = 0x001400; + hwsq_kick = 0x00000003; + } else { + hwsq_data = 0x080000; + hwsq_kick = 0x00000001; + } + /* upload hwsq ucode */ + nv_mask(device, 0x001098, 0x00000008, 0x00000000); + nv_wr32(device, 0x001304, 0x00000000); + if (nv_device(drm->device)->chipset >= 0x92) + nv_wr32(device, 0x001318, 0x00000000); + for (i = 0; i < hwsq->len / 4; i++) + nv_wr32(device, hwsq_data + (i * 4), hwsq->ptr.u32[i]); + nv_mask(device, 0x001098, 0x00000018, 0x00000018); + + /* launch, and wait for completion */ + nv_wr32(device, 0x00130c, hwsq_kick); + if (!nv_wait(device, 0x001308, 0x00000100, 0x00000000)) { + NV_ERROR(drm, "hwsq ucode exec timed out\n"); + NV_ERROR(drm, "0x001308: 0x%08x\n", nv_rd32(device, 0x001308)); + for (i = 0; i < hwsq->len / 4; i++) { + NV_ERROR(drm, "0x%06x: 0x%08x\n", 0x1400 + (i * 4), + nv_rd32(device, 0x001400 + (i * 4))); + } + + return -EIO; + } + + return 0; +} + +int +nv50_pm_clocks_set(struct drm_device *dev, void *data) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nv50_pm_state *info = data; + struct bit_entry M; + int ret = -EBUSY; + + /* halt and idle execution engines */ + nv_mask(device, 0x002504, 0x00000001, 0x00000001); + if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010)) + goto resume; + if (!nv_wait(device, 0x00251c, 0x0000003f, 0x0000003f)) + goto resume; + + /* program memory clock, if necessary - must come before engine clock + * reprogramming due to how we construct the hwsq scripts in pre() + */ +#define nouveau_bios_init_exec(a,b) nouveau_bios_run_init_table((a), (b), NULL, 0) + if (info->mclk_hwsq.len) { + /* execute some scripts that do ??? from the vbios.. */ + if (!bit_table(dev, 'M', &M) && M.version == 1) { + if (M.length >= 6) + nouveau_bios_init_exec(dev, ROM16(M.data[5])); + if (M.length >= 8) + nouveau_bios_init_exec(dev, ROM16(M.data[7])); + if (M.length >= 10) + nouveau_bios_init_exec(dev, ROM16(M.data[9])); + nouveau_bios_init_exec(dev, info->mscript); + } + + ret = prog_hwsq(dev, &info->mclk_hwsq); + if (ret) + goto resume; + } + + /* program engine clocks */ + ret = prog_hwsq(dev, &info->eclk_hwsq); + +resume: + nv_mask(device, 0x002504, 0x00000001, 0x00000000); + kfree(info); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c new file mode 100644 index 0000000..9fd475c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -0,0 +1,276 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <core/client.h> +#include <core/class.h> + +#include <engine/fifo.h> + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_fence.h" + +#include "nv50_display.h" + +u64 +nv84_fence_crtc(struct nouveau_channel *chan, int crtc) +{ + struct nv84_fence_chan *fctx = chan->fence; + return fctx->dispc_vma[crtc].offset; +} + +static int +nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence) +{ + int ret = RING_SPACE(chan, 8); + if (ret == 0) { + BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); + OUT_RING (chan, chan->vram); + BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5); + OUT_RING (chan, upper_32_bits(virtual)); + OUT_RING (chan, lower_32_bits(virtual)); + OUT_RING (chan, sequence); + OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG); + OUT_RING (chan, 0x00000000); + FIRE_RING (chan); + } + return ret; +} + +static int +nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence) +{ + int ret = RING_SPACE(chan, 7); + if (ret == 0) { + BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); + OUT_RING (chan, chan->vram); + BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + OUT_RING (chan, upper_32_bits(virtual)); + OUT_RING (chan, lower_32_bits(virtual)); + OUT_RING (chan, sequence); + OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL); + FIRE_RING (chan); + } + return ret; +} + +static int +nv84_fence_emit(struct nouveau_fence *fence) +{ + struct nouveau_channel *chan = fence->channel; + struct nv84_fence_chan *fctx = chan->fence; + struct nouveau_fifo_chan *fifo = (void *)chan->object; + u64 addr = fifo->chid * 16; + + if (fence->sysmem) + addr += fctx->vma_gart.offset; + else + addr += fctx->vma.offset; + + return fctx->base.emit32(chan, addr, fence->sequence); +} + +static int +nv84_fence_sync(struct nouveau_fence *fence, + struct nouveau_channel *prev, struct nouveau_channel *chan) +{ + struct nv84_fence_chan *fctx = chan->fence; + struct nouveau_fifo_chan *fifo = (void *)prev->object; + u64 addr = fifo->chid * 16; + + if (fence->sysmem) + addr += fctx->vma_gart.offset; + else + addr += fctx->vma.offset; + + return fctx->base.sync32(chan, addr, fence->sequence); +} + +static u32 +nv84_fence_read(struct nouveau_channel *chan) +{ + struct nouveau_fifo_chan *fifo = (void *)chan->object; + struct nv84_fence_priv *priv = chan->drm->fence; + return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4); +} + +static void +nv84_fence_context_del(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->drm->dev; + struct nv84_fence_priv *priv = chan->drm->fence; + struct nv84_fence_chan *fctx = chan->fence; + int i; + + for (i = 0; i < dev->mode_config.num_crtc; i++) { + struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); + nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]); + } + + nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); + nouveau_bo_vma_del(priv->bo, &fctx->vma); + nouveau_fence_context_del(&fctx->base); + chan->fence = NULL; + kfree(fctx); +} + +int +nv84_fence_context_new(struct nouveau_channel *chan) +{ + struct nouveau_fifo_chan *fifo = (void *)chan->object; + struct nouveau_client *client = nouveau_client(fifo); + struct nv84_fence_priv *priv = chan->drm->fence; + struct nv84_fence_chan *fctx; + int ret, i; + + fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); + if (!fctx) + return -ENOMEM; + + nouveau_fence_context_new(&fctx->base); + fctx->base.emit = nv84_fence_emit; + fctx->base.sync = nv84_fence_sync; + fctx->base.read = nv84_fence_read; + fctx->base.emit32 = nv84_fence_emit32; + fctx->base.sync32 = nv84_fence_sync32; + + ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma); + if (ret == 0) { + ret = nouveau_bo_vma_add(priv->bo_gart, client->vm, + &fctx->vma_gart); + } + + /* map display semaphore buffers into channel's vm */ + for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { + struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i); + ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]); + } + + nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000); + + if (ret) + nv84_fence_context_del(chan); + return ret; +} + +static bool +nv84_fence_suspend(struct nouveau_drm *drm) +{ + struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); + struct nv84_fence_priv *priv = drm->fence; + int i; + + priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32)); + if (priv->suspend) { + for (i = 0; i <= pfifo->max; i++) + priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4); + } + + return priv->suspend != NULL; +} + +static void +nv84_fence_resume(struct nouveau_drm *drm) +{ + struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); + struct nv84_fence_priv *priv = drm->fence; + int i; + + if (priv->suspend) { + for (i = 0; i <= pfifo->max; i++) + nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]); + vfree(priv->suspend); + priv->suspend = NULL; + } +} + +static void +nv84_fence_destroy(struct nouveau_drm *drm) +{ + struct nv84_fence_priv *priv = drm->fence; + nouveau_bo_unmap(priv->bo_gart); + if (priv->bo_gart) + nouveau_bo_unpin(priv->bo_gart); + nouveau_bo_ref(NULL, &priv->bo_gart); + nouveau_bo_unmap(priv->bo); + if (priv->bo) + nouveau_bo_unpin(priv->bo); + nouveau_bo_ref(NULL, &priv->bo); + drm->fence = NULL; + kfree(priv); +} + +int +nv84_fence_create(struct nouveau_drm *drm) +{ + struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); + struct nv84_fence_priv *priv; + int ret; + + priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base.dtor = nv84_fence_destroy; + priv->base.suspend = nv84_fence_suspend; + priv->base.resume = nv84_fence_resume; + priv->base.context_new = nv84_fence_context_new; + priv->base.context_del = nv84_fence_context_del; + + init_waitqueue_head(&priv->base.waiting); + priv->base.uevent = true; + + ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0, + TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo); + if (ret == 0) { + ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); + if (ret == 0) { + ret = nouveau_bo_map(priv->bo); + if (ret) + nouveau_bo_unpin(priv->bo); + } + if (ret) + nouveau_bo_ref(NULL, &priv->bo); + } + + if (ret == 0) + ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0, + TTM_PL_FLAG_TT, 0, 0, NULL, + &priv->bo_gart); + if (ret == 0) { + ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT); + if (ret == 0) { + ret = nouveau_bo_map(priv->bo_gart); + if (ret) + nouveau_bo_unpin(priv->bo_gart); + } + if (ret) + nouveau_bo_ref(NULL, &priv->bo_gart); + } + + if (ret) + nv84_fence_destroy(drm); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c new file mode 100644 index 0000000..863f010 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nva3_pm.c @@ -0,0 +1,624 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <drm/drmP.h> +#include "nouveau_drm.h" +#include "nouveau_bios.h" +#include "nouveau_pm.h" + +#include <subdev/bios/pll.h> +#include <subdev/bios.h> +#include <subdev/clock.h> +#include <subdev/timer.h> +#include <subdev/fb.h> + +static u32 read_clk(struct drm_device *, int, bool); +static u32 read_pll(struct drm_device *, int, u32); + +static u32 +read_vco(struct drm_device *dev, int clk) +{ + struct nouveau_device *device = nouveau_dev(dev); + u32 sctl = nv_rd32(device, 0x4120 + (clk * 4)); + if ((sctl & 0x00000030) != 0x00000030) + return read_pll(dev, 0x41, 0x00e820); + return read_pll(dev, 0x42, 0x00e8a0); +} + +static u32 +read_clk(struct drm_device *dev, int clk, bool ignore_en) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + u32 sctl, sdiv, sclk; + + /* refclk for the 0xe8xx plls is a fixed frequency */ + if (clk >= 0x40) { + if (nv_device(drm->device)->chipset == 0xaf) { + /* no joke.. seriously.. sigh.. */ + return nv_rd32(device, 0x00471c) * 1000; + } + + return device->crystal; + } + + sctl = nv_rd32(device, 0x4120 + (clk * 4)); + if (!ignore_en && !(sctl & 0x00000100)) + return 0; + + switch (sctl & 0x00003000) { + case 0x00000000: + return device->crystal; + case 0x00002000: + if (sctl & 0x00000040) + return 108000; + return 100000; + case 0x00003000: + sclk = read_vco(dev, clk); + sdiv = ((sctl & 0x003f0000) >> 16) + 2; + return (sclk * 2) / sdiv; + default: + return 0; + } +} + +static u32 +read_pll(struct drm_device *dev, int clk, u32 pll) +{ + struct nouveau_device *device = nouveau_dev(dev); + u32 ctrl = nv_rd32(device, pll + 0); + u32 sclk = 0, P = 1, N = 1, M = 1; + + if (!(ctrl & 0x00000008)) { + if (ctrl & 0x00000001) { + u32 coef = nv_rd32(device, pll + 4); + M = (coef & 0x000000ff) >> 0; + N = (coef & 0x0000ff00) >> 8; + P = (coef & 0x003f0000) >> 16; + + /* no post-divider on these.. */ + if ((pll & 0x00ff00) == 0x00e800) + P = 1; + + sclk = read_clk(dev, 0x00 + clk, false); + } + } else { + sclk = read_clk(dev, 0x10 + clk, false); + } + + if (M * P) + return sclk * N / (M * P); + return 0; +} + +struct creg { + u32 clk; + u32 pll; +}; + +static int +calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_bios *bios = nouveau_bios(device); + struct nvbios_pll limits; + u32 oclk, sclk, sdiv; + int P, N, M, diff; + int ret; + + reg->pll = 0; + reg->clk = 0; + if (!khz) { + NV_DEBUG(drm, "no clock for 0x%04x/0x%02x\n", pll, clk); + return 0; + } + + switch (khz) { + case 27000: + reg->clk = 0x00000100; + return khz; + case 100000: + reg->clk = 0x00002100; + return khz; + case 108000: + reg->clk = 0x00002140; + return khz; + default: + sclk = read_vco(dev, clk); + sdiv = min((sclk * 2) / (khz - 2999), (u32)65); + /* if the clock has a PLL attached, and we can get a within + * [-2, 3) MHz of a divider, we'll disable the PLL and use + * the divider instead. + * + * divider can go as low as 2, limited here because NVIDIA + * and the VBIOS on my NVA8 seem to prefer using the PLL + * for 810MHz - is there a good reason? + */ + if (sdiv > 4) { + oclk = (sclk * 2) / sdiv; + diff = khz - oclk; + if (!pll || (diff >= -2000 && diff < 3000)) { + reg->clk = (((sdiv - 2) << 16) | 0x00003100); + return oclk; + } + } + + if (!pll) { + NV_ERROR(drm, "bad freq %02x: %d %d\n", clk, khz, sclk); + return -ERANGE; + } + + break; + } + + ret = nvbios_pll_parse(bios, pll, &limits); + if (ret) + return ret; + + limits.refclk = read_clk(dev, clk - 0x10, true); + if (!limits.refclk) + return -EINVAL; + + ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P); + if (ret >= 0) { + reg->clk = nv_rd32(device, 0x4120 + (clk * 4)); + reg->pll = (P << 16) | (N << 8) | M; + } + + return ret; +} + +static void +prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + const u32 src0 = 0x004120 + (clk * 4); + const u32 src1 = 0x004160 + (clk * 4); + const u32 ctrl = pll + 0; + const u32 coef = pll + 4; + + if (!reg->clk && !reg->pll) { + NV_DEBUG(drm, "no clock for %02x\n", clk); + return; + } + + if (reg->pll) { + nv_mask(device, src0, 0x00000101, 0x00000101); + nv_wr32(device, coef, reg->pll); + nv_mask(device, ctrl, 0x00000015, 0x00000015); + nv_mask(device, ctrl, 0x00000010, 0x00000000); + nv_wait(device, ctrl, 0x00020000, 0x00020000); + nv_mask(device, ctrl, 0x00000010, 0x00000010); + nv_mask(device, ctrl, 0x00000008, 0x00000000); + nv_mask(device, src1, 0x00000100, 0x00000000); + nv_mask(device, src1, 0x00000001, 0x00000000); + } else { + nv_mask(device, src1, 0x003f3141, 0x00000101 | reg->clk); + nv_mask(device, ctrl, 0x00000018, 0x00000018); + udelay(20); + nv_mask(device, ctrl, 0x00000001, 0x00000000); + nv_mask(device, src0, 0x00000100, 0x00000000); + nv_mask(device, src0, 0x00000001, 0x00000000); + } +} + +static void +prog_clk(struct drm_device *dev, int clk, struct creg *reg) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + + if (!reg->clk) { + NV_DEBUG(drm, "no clock for %02x\n", clk); + return; + } + + nv_mask(device, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk); +} + +int +nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) +{ + perflvl->core = read_pll(dev, 0x00, 0x4200); + perflvl->shader = read_pll(dev, 0x01, 0x4220); + perflvl->memory = read_pll(dev, 0x02, 0x4000); + perflvl->unka0 = read_clk(dev, 0x20, false); + perflvl->vdec = read_clk(dev, 0x21, false); + perflvl->daemon = read_clk(dev, 0x25, false); + perflvl->copy = perflvl->core; + return 0; +} + +struct nva3_pm_state { + struct nouveau_pm_level *perflvl; + + struct creg nclk; + struct creg sclk; + struct creg vdec; + struct creg unka0; + + struct creg mclk; + u8 *rammap; + u8 rammap_ver; + u8 rammap_len; + u8 *ramcfg; + u8 ramcfg_len; + u32 r004018; + u32 r100760; +}; + +void * +nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) +{ + struct nva3_pm_state *info; + u8 ramcfg_cnt; + int ret; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); + + ret = calc_clk(dev, 0x10, 0x4200, perflvl->core, &info->nclk); + if (ret < 0) + goto out; + + ret = calc_clk(dev, 0x11, 0x4220, perflvl->shader, &info->sclk); + if (ret < 0) + goto out; + + ret = calc_clk(dev, 0x12, 0x4000, perflvl->memory, &info->mclk); + if (ret < 0) + goto out; + + ret = calc_clk(dev, 0x20, 0x0000, perflvl->unka0, &info->unka0); + if (ret < 0) + goto out; + + ret = calc_clk(dev, 0x21, 0x0000, perflvl->vdec, &info->vdec); + if (ret < 0) + goto out; + + info->rammap = nouveau_perf_rammap(dev, perflvl->memory, + &info->rammap_ver, + &info->rammap_len, + &ramcfg_cnt, &info->ramcfg_len); + if (info->rammap_ver != 0x10 || info->rammap_len < 5) + info->rammap = NULL; + + info->ramcfg = nouveau_perf_ramcfg(dev, perflvl->memory, + &info->rammap_ver, + &info->ramcfg_len); + if (info->rammap_ver != 0x10) + info->ramcfg = NULL; + + info->perflvl = perflvl; +out: + if (ret < 0) { + kfree(info); + info = ERR_PTR(ret); + } + return info; +} + +static bool +nva3_pm_grcp_idle(void *data) +{ + struct drm_device *dev = data; + struct nouveau_device *device = nouveau_dev(dev); + + if (!(nv_rd32(device, 0x400304) & 0x00000001)) + return true; + if (nv_rd32(device, 0x400308) == 0x0050001c) + return true; + return false; +} + +static void +mclk_precharge(struct nouveau_mem_exec_func *exec) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + nv_wr32(device, 0x1002d4, 0x00000001); +} + +static void +mclk_refresh(struct nouveau_mem_exec_func *exec) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + nv_wr32(device, 0x1002d0, 0x00000001); +} + +static void +mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + nv_wr32(device, 0x100210, enable ? 0x80000000 : 0x00000000); +} + +static void +mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + nv_wr32(device, 0x1002dc, enable ? 0x00000001 : 0x00000000); +} + +static void +mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + volatile u32 post = nv_rd32(device, 0); (void)post; + udelay((nsec + 500) / 1000); +} + +static u32 +mclk_mrg(struct nouveau_mem_exec_func *exec, int mr) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + if (mr <= 1) + return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4)); + if (mr <= 3) + return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4)); + return 0; +} + +static void +mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + struct nouveau_fb *pfb = nouveau_fb(device); + if (mr <= 1) { + if (pfb->ram.ranks > 1) + nv_wr32(device, 0x1002c8 + ((mr - 0) * 4), data); + nv_wr32(device, 0x1002c0 + ((mr - 0) * 4), data); + } else + if (mr <= 3) { + if (pfb->ram.ranks > 1) + nv_wr32(device, 0x1002e8 + ((mr - 2) * 4), data); + nv_wr32(device, 0x1002e0 + ((mr - 2) * 4), data); + } +} + +static void +mclk_clock_set(struct nouveau_mem_exec_func *exec) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + struct nva3_pm_state *info = exec->priv; + u32 ctrl; + + ctrl = nv_rd32(device, 0x004000); + if (!(ctrl & 0x00000008) && info->mclk.pll) { + nv_wr32(device, 0x004000, (ctrl |= 0x00000008)); + nv_mask(device, 0x1110e0, 0x00088000, 0x00088000); + nv_wr32(device, 0x004018, 0x00001000); + nv_wr32(device, 0x004000, (ctrl &= ~0x00000001)); + nv_wr32(device, 0x004004, info->mclk.pll); + nv_wr32(device, 0x004000, (ctrl |= 0x00000001)); + udelay(64); + nv_wr32(device, 0x004018, 0x00005000 | info->r004018); + udelay(20); + } else + if (!info->mclk.pll) { + nv_mask(device, 0x004168, 0x003f3040, info->mclk.clk); + nv_wr32(device, 0x004000, (ctrl |= 0x00000008)); + nv_mask(device, 0x1110e0, 0x00088000, 0x00088000); + nv_wr32(device, 0x004018, 0x0000d000 | info->r004018); + } + + if (info->rammap) { + if (info->ramcfg && (info->rammap[4] & 0x08)) { + u32 unk5a0 = (ROM16(info->ramcfg[5]) << 8) | + info->ramcfg[5]; + u32 unk5a4 = ROM16(info->ramcfg[7]); + u32 unk804 = (info->ramcfg[9] & 0xf0) << 16 | + (info->ramcfg[3] & 0x0f) << 16 | + (info->ramcfg[9] & 0x0f) | + 0x80000000; + nv_wr32(device, 0x1005a0, unk5a0); + nv_wr32(device, 0x1005a4, unk5a4); + nv_wr32(device, 0x10f804, unk804); + nv_mask(device, 0x10053c, 0x00001000, 0x00000000); + } else { + nv_mask(device, 0x10053c, 0x00001000, 0x00001000); + nv_mask(device, 0x10f804, 0x80000000, 0x00000000); + nv_mask(device, 0x100760, 0x22222222, info->r100760); + nv_mask(device, 0x1007a0, 0x22222222, info->r100760); + nv_mask(device, 0x1007e0, 0x22222222, info->r100760); + } + } + + if (info->mclk.pll) { + nv_mask(device, 0x1110e0, 0x00088000, 0x00011000); + nv_wr32(device, 0x004000, (ctrl &= ~0x00000008)); + } +} + +static void +mclk_timing_set(struct nouveau_mem_exec_func *exec) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + struct nva3_pm_state *info = exec->priv; + struct nouveau_pm_level *perflvl = info->perflvl; + int i; + + for (i = 0; i < 9; i++) + nv_wr32(device, 0x100220 + (i * 4), perflvl->timing.reg[i]); + + if (info->ramcfg) { + u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000; + nv_mask(device, 0x100200, 0x00001000, data); + } + + if (info->ramcfg) { + u32 unk714 = nv_rd32(device, 0x100714) & ~0xf0000010; + u32 unk718 = nv_rd32(device, 0x100718) & ~0x00000100; + u32 unk71c = nv_rd32(device, 0x10071c) & ~0x00000100; + if ( (info->ramcfg[2] & 0x20)) + unk714 |= 0xf0000000; + if (!(info->ramcfg[2] & 0x04)) + unk714 |= 0x00000010; + nv_wr32(device, 0x100714, unk714); + + if (info->ramcfg[2] & 0x01) + unk71c |= 0x00000100; + nv_wr32(device, 0x10071c, unk71c); + + if (info->ramcfg[2] & 0x02) + unk718 |= 0x00000100; + nv_wr32(device, 0x100718, unk718); + + if (info->ramcfg[2] & 0x10) + nv_wr32(device, 0x111100, 0x48000000); /*XXX*/ + } +} + +static void +prog_mem(struct drm_device *dev, struct nva3_pm_state *info) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_mem_exec_func exec = { + .dev = dev, + .precharge = mclk_precharge, + .refresh = mclk_refresh, + .refresh_auto = mclk_refresh_auto, + .refresh_self = mclk_refresh_self, + .wait = mclk_wait, + .mrg = mclk_mrg, + .mrs = mclk_mrs, + .clock_set = mclk_clock_set, + .timing_set = mclk_timing_set, + .priv = info + }; + u32 ctrl; + + /* XXX: where the fuck does 750MHz come from? */ + if (info->perflvl->memory <= 750000) { + info->r004018 = 0x10000000; + info->r100760 = 0x22222222; + } + + ctrl = nv_rd32(device, 0x004000); + if (ctrl & 0x00000008) { + if (info->mclk.pll) { + nv_mask(device, 0x004128, 0x00000101, 0x00000101); + nv_wr32(device, 0x004004, info->mclk.pll); + nv_wr32(device, 0x004000, (ctrl |= 0x00000001)); + nv_wr32(device, 0x004000, (ctrl &= 0xffffffef)); + nv_wait(device, 0x004000, 0x00020000, 0x00020000); + nv_wr32(device, 0x004000, (ctrl |= 0x00000010)); + nv_wr32(device, 0x004018, 0x00005000 | info->r004018); + nv_wr32(device, 0x004000, (ctrl |= 0x00000004)); + } + } else { + u32 ssel = 0x00000101; + if (info->mclk.clk) + ssel |= info->mclk.clk; + else + ssel |= 0x00080000; /* 324MHz, shouldn't matter... */ + nv_mask(device, 0x004168, 0x003f3141, ctrl); + } + + if (info->ramcfg) { + if (info->ramcfg[2] & 0x10) { + nv_mask(device, 0x111104, 0x00000600, 0x00000000); + } else { + nv_mask(device, 0x111100, 0x40000000, 0x40000000); + nv_mask(device, 0x111104, 0x00000180, 0x00000000); + } + } + if (info->rammap && !(info->rammap[4] & 0x02)) + nv_mask(device, 0x100200, 0x00000800, 0x00000000); + nv_wr32(device, 0x611200, 0x00003300); + if (!(info->ramcfg[2] & 0x10)) + nv_wr32(device, 0x111100, 0x4c020000); /*XXX*/ + + nouveau_mem_exec(&exec, info->perflvl); + + nv_wr32(device, 0x611200, 0x00003330); + if (info->rammap && (info->rammap[4] & 0x02)) + nv_mask(device, 0x100200, 0x00000800, 0x00000800); + if (info->ramcfg) { + if (info->ramcfg[2] & 0x10) { + nv_mask(device, 0x111104, 0x00000180, 0x00000180); + nv_mask(device, 0x111100, 0x40000000, 0x00000000); + } else { + nv_mask(device, 0x111104, 0x00000600, 0x00000600); + } + } + + if (info->mclk.pll) { + nv_mask(device, 0x004168, 0x00000001, 0x00000000); + nv_mask(device, 0x004168, 0x00000100, 0x00000000); + } else { + nv_mask(device, 0x004000, 0x00000001, 0x00000000); + nv_mask(device, 0x004128, 0x00000001, 0x00000000); + nv_mask(device, 0x004128, 0x00000100, 0x00000000); + } +} + +int +nva3_pm_clocks_set(struct drm_device *dev, void *pre_state) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nva3_pm_state *info = pre_state; + int ret = -EAGAIN; + + /* prevent any new grctx switches from starting */ + nv_wr32(device, 0x400324, 0x00000000); + nv_wr32(device, 0x400328, 0x0050001c); /* wait flag 0x1c */ + /* wait for any pending grctx switches to complete */ + if (!nv_wait_cb(device, nva3_pm_grcp_idle, dev)) { + NV_ERROR(drm, "pm: ctxprog didn't go idle\n"); + goto cleanup; + } + /* freeze PFIFO */ + nv_mask(device, 0x002504, 0x00000001, 0x00000001); + if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010)) { + NV_ERROR(drm, "pm: fifo didn't go idle\n"); + goto cleanup; + } + + prog_pll(dev, 0x00, 0x004200, &info->nclk); + prog_pll(dev, 0x01, 0x004220, &info->sclk); + prog_clk(dev, 0x20, &info->unka0); + prog_clk(dev, 0x21, &info->vdec); + + if (info->mclk.clk || info->mclk.pll) + prog_mem(dev, info); + + ret = 0; + +cleanup: + /* unfreeze PFIFO */ + nv_mask(device, 0x002504, 0x00000001, 0x00000000); + /* restore ctxprog to normal */ + nv_wr32(device, 0x400324, 0x00000000); + nv_wr32(device, 0x400328, 0x0070009c); /* set flag 0x1c */ + /* unblock it if necessary */ + if (nv_rd32(device, 0x400308) == 0x0050001c) + nv_mask(device, 0x400824, 0x10000000, 0x10000000); + kfree(info); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c new file mode 100644 index 0000000..9dcd30f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c @@ -0,0 +1,262 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_fbcon.h" + +int +nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); + struct nouveau_channel *chan = drm->channel; + int ret; + + ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11); + if (ret) + return ret; + + if (rect->rop != ROP_COPY) { + BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1); + OUT_RING (chan, 1); + } + BEGIN_NVC0(chan, NvSub2D, 0x0588, 1); + if (info->fix.visual == FB_VISUAL_TRUECOLOR || + info->fix.visual == FB_VISUAL_DIRECTCOLOR) + OUT_RING (chan, ((uint32_t *)info->pseudo_palette)[rect->color]); + else + OUT_RING (chan, rect->color); + BEGIN_NVC0(chan, NvSub2D, 0x0600, 4); + OUT_RING (chan, rect->dx); + OUT_RING (chan, rect->dy); + OUT_RING (chan, rect->dx + rect->width); + OUT_RING (chan, rect->dy + rect->height); + if (rect->rop != ROP_COPY) { + BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1); + OUT_RING (chan, 3); + } + FIRE_RING(chan); + return 0; +} + +int +nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); + struct nouveau_channel *chan = drm->channel; + int ret; + + ret = RING_SPACE(chan, 12); + if (ret) + return ret; + + BEGIN_NVC0(chan, NvSub2D, 0x0110, 1); + OUT_RING (chan, 0); + BEGIN_NVC0(chan, NvSub2D, 0x08b0, 4); + OUT_RING (chan, region->dx); + OUT_RING (chan, region->dy); + OUT_RING (chan, region->width); + OUT_RING (chan, region->height); + BEGIN_NVC0(chan, NvSub2D, 0x08d0, 4); + OUT_RING (chan, 0); + OUT_RING (chan, region->sx); + OUT_RING (chan, 0); + OUT_RING (chan, region->sy); + FIRE_RING(chan); + return 0; +} + +int +nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); + struct nouveau_channel *chan = drm->channel; + uint32_t width, dwords, *data = (uint32_t *)image->data; + uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); + uint32_t *palette = info->pseudo_palette; + int ret; + + if (image->depth != 1) + return -ENODEV; + + ret = RING_SPACE(chan, 11); + if (ret) + return ret; + + width = ALIGN(image->width, 32); + dwords = (width * image->height) >> 5; + + BEGIN_NVC0(chan, NvSub2D, 0x0814, 2); + if (info->fix.visual == FB_VISUAL_TRUECOLOR || + info->fix.visual == FB_VISUAL_DIRECTCOLOR) { + OUT_RING (chan, palette[image->bg_color] | mask); + OUT_RING (chan, palette[image->fg_color] | mask); + } else { + OUT_RING (chan, image->bg_color); + OUT_RING (chan, image->fg_color); + } + BEGIN_NVC0(chan, NvSub2D, 0x0838, 2); + OUT_RING (chan, image->width); + OUT_RING (chan, image->height); + BEGIN_NVC0(chan, NvSub2D, 0x0850, 4); + OUT_RING (chan, 0); + OUT_RING (chan, image->dx); + OUT_RING (chan, 0); + OUT_RING (chan, image->dy); + + while (dwords) { + int push = dwords > 2047 ? 2047 : dwords; + + ret = RING_SPACE(chan, push + 1); + if (ret) + return ret; + + dwords -= push; + + BEGIN_NIC0(chan, NvSub2D, 0x0860, push); + OUT_RINGp(chan, data, push); + data += push; + } + + FIRE_RING(chan); + return 0; +} + +int +nvc0_fbcon_accel_init(struct fb_info *info) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; + struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_channel *chan = drm->channel; + struct nouveau_object *object; + int ret, format; + + ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D, + 0x902d, NULL, 0, &object); + if (ret) + return ret; + + switch (info->var.bits_per_pixel) { + case 8: + format = 0xf3; + break; + case 15: + format = 0xf8; + break; + case 16: + format = 0xe8; + break; + case 32: + switch (info->var.transp.length) { + case 0: /* depth 24 */ + case 8: /* depth 32, just use 24.. */ + format = 0xe6; + break; + case 2: /* depth 30 */ + format = 0xd1; + break; + default: + return -EINVAL; + } + break; + default: + return -EINVAL; + } + + ret = RING_SPACE(chan, 60); + if (ret) { + WARN_ON(1); + nouveau_fbcon_gpu_lockup(info); + return ret; + } + + BEGIN_NVC0(chan, NvSub2D, 0x0000, 1); + OUT_RING (chan, 0x0000902d); + BEGIN_NVC0(chan, NvSub2D, 0x0290, 1); + OUT_RING (chan, 0); + BEGIN_NVC0(chan, NvSub2D, 0x0888, 1); + OUT_RING (chan, 1); + BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1); + OUT_RING (chan, 3); + BEGIN_NVC0(chan, NvSub2D, 0x02a0, 1); + OUT_RING (chan, 0x55); + BEGIN_NVC0(chan, NvSub2D, 0x08c0, 4); + OUT_RING (chan, 0); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, 1); + BEGIN_NVC0(chan, NvSub2D, 0x0580, 2); + OUT_RING (chan, 4); + OUT_RING (chan, format); + BEGIN_NVC0(chan, NvSub2D, 0x02e8, 2); + OUT_RING (chan, 2); + OUT_RING (chan, 1); + + BEGIN_NVC0(chan, NvSub2D, 0x0804, 1); + OUT_RING (chan, format); + BEGIN_NVC0(chan, NvSub2D, 0x0800, 1); + OUT_RING (chan, 1); + BEGIN_NVC0(chan, NvSub2D, 0x0808, 3); + OUT_RING (chan, 0); + OUT_RING (chan, 0); + OUT_RING (chan, 1); + BEGIN_NVC0(chan, NvSub2D, 0x081c, 1); + OUT_RING (chan, 1); + BEGIN_NVC0(chan, NvSub2D, 0x0840, 4); + OUT_RING (chan, 0); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, 1); + BEGIN_NVC0(chan, NvSub2D, 0x0200, 10); + OUT_RING (chan, format); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, info->fix.line_length); + OUT_RING (chan, info->var.xres_virtual); + OUT_RING (chan, info->var.yres_virtual); + OUT_RING (chan, upper_32_bits(fb->vma.offset)); + OUT_RING (chan, lower_32_bits(fb->vma.offset)); + BEGIN_NVC0(chan, NvSub2D, 0x0230, 10); + OUT_RING (chan, format); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, info->fix.line_length); + OUT_RING (chan, info->var.xres_virtual); + OUT_RING (chan, info->var.yres_virtual); + OUT_RING (chan, upper_32_bits(fb->vma.offset)); + OUT_RING (chan, lower_32_bits(fb->vma.offset)); + FIRE_RING (chan); + + return 0; +} + diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c new file mode 100644 index 0000000..9566267 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_fence.c @@ -0,0 +1,90 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include <core/object.h> +#include <core/client.h> +#include <core/class.h> + +#include <engine/fifo.h> + +#include "nouveau_drm.h" +#include "nouveau_dma.h" +#include "nouveau_fence.h" + +#include "nv50_display.h" + +static int +nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence) +{ + int ret = RING_SPACE(chan, 6); + if (ret == 0) { + BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5); + OUT_RING (chan, upper_32_bits(virtual)); + OUT_RING (chan, lower_32_bits(virtual)); + OUT_RING (chan, sequence); + OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG); + OUT_RING (chan, 0x00000000); + FIRE_RING (chan); + } + return ret; +} + +static int +nvc0_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence) +{ + int ret = RING_SPACE(chan, 5); + if (ret == 0) { + BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); + OUT_RING (chan, upper_32_bits(virtual)); + OUT_RING (chan, lower_32_bits(virtual)); + OUT_RING (chan, sequence); + OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL | + NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD); + FIRE_RING (chan); + } + return ret; +} + +static int +nvc0_fence_context_new(struct nouveau_channel *chan) +{ + int ret = nv84_fence_context_new(chan); + if (ret == 0) { + struct nv84_fence_chan *fctx = chan->fence; + fctx->base.emit32 = nvc0_fence_emit32; + fctx->base.sync32 = nvc0_fence_sync32; + } + return ret; +} + +int +nvc0_fence_create(struct nouveau_drm *drm) +{ + int ret = nv84_fence_create(drm); + if (ret == 0) { + struct nv84_fence_priv *priv = drm->fence; + priv->base.context_new = nvc0_fence_context_new; + } + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c new file mode 100644 index 0000000..0d34eb5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_pm.c @@ -0,0 +1,599 @@ +/* + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "nouveau_drm.h" +#include "nouveau_bios.h" +#include "nouveau_pm.h" + +#include <subdev/bios/pll.h> +#include <subdev/bios.h> +#include <subdev/clock.h> +#include <subdev/timer.h> +#include <subdev/fb.h> + +static u32 read_div(struct drm_device *, int, u32, u32); +static u32 read_pll(struct drm_device *, u32); + +static u32 +read_vco(struct drm_device *dev, u32 dsrc) +{ + struct nouveau_device *device = nouveau_dev(dev); + u32 ssrc = nv_rd32(device, dsrc); + if (!(ssrc & 0x00000100)) + return read_pll(dev, 0x00e800); + return read_pll(dev, 0x00e820); +} + +static u32 +read_pll(struct drm_device *dev, u32 pll) +{ + struct nouveau_device *device = nouveau_dev(dev); + u32 ctrl = nv_rd32(device, pll + 0); + u32 coef = nv_rd32(device, pll + 4); + u32 P = (coef & 0x003f0000) >> 16; + u32 N = (coef & 0x0000ff00) >> 8; + u32 M = (coef & 0x000000ff) >> 0; + u32 sclk, doff; + + if (!(ctrl & 0x00000001)) + return 0; + + switch (pll & 0xfff000) { + case 0x00e000: + sclk = 27000; + P = 1; + break; + case 0x137000: + doff = (pll - 0x137000) / 0x20; + sclk = read_div(dev, doff, 0x137120, 0x137140); + break; + case 0x132000: + switch (pll) { + case 0x132000: + sclk = read_pll(dev, 0x132020); + break; + case 0x132020: + sclk = read_div(dev, 0, 0x137320, 0x137330); + break; + default: + return 0; + } + break; + default: + return 0; + } + + return sclk * N / M / P; +} + +static u32 +read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl) +{ + struct nouveau_device *device = nouveau_dev(dev); + u32 ssrc = nv_rd32(device, dsrc + (doff * 4)); + u32 sctl = nv_rd32(device, dctl + (doff * 4)); + + switch (ssrc & 0x00000003) { + case 0: + if ((ssrc & 0x00030000) != 0x00030000) + return 27000; + return 108000; + case 2: + return 100000; + case 3: + if (sctl & 0x80000000) { + u32 sclk = read_vco(dev, dsrc + (doff * 4)); + u32 sdiv = (sctl & 0x0000003f) + 2; + return (sclk * 2) / sdiv; + } + + return read_vco(dev, dsrc + (doff * 4)); + default: + return 0; + } +} + +static u32 +read_mem(struct drm_device *dev) +{ + struct nouveau_device *device = nouveau_dev(dev); + u32 ssel = nv_rd32(device, 0x1373f0); + if (ssel & 0x00000001) + return read_div(dev, 0, 0x137300, 0x137310); + return read_pll(dev, 0x132000); +} + +static u32 +read_clk(struct drm_device *dev, int clk) +{ + struct nouveau_device *device = nouveau_dev(dev); + u32 sctl = nv_rd32(device, 0x137250 + (clk * 4)); + u32 ssel = nv_rd32(device, 0x137100); + u32 sclk, sdiv; + + if (ssel & (1 << clk)) { + if (clk < 7) + sclk = read_pll(dev, 0x137000 + (clk * 0x20)); + else + sclk = read_pll(dev, 0x1370e0); + sdiv = ((sctl & 0x00003f00) >> 8) + 2; + } else { + sclk = read_div(dev, clk, 0x137160, 0x1371d0); + sdiv = ((sctl & 0x0000003f) >> 0) + 2; + } + + if (sctl & 0x80000000) + return (sclk * 2) / sdiv; + return sclk; +} + +int +nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) +{ + perflvl->shader = read_clk(dev, 0x00); + perflvl->core = perflvl->shader / 2; + perflvl->memory = read_mem(dev); + perflvl->rop = read_clk(dev, 0x01); + perflvl->hub07 = read_clk(dev, 0x02); + perflvl->hub06 = read_clk(dev, 0x07); + perflvl->hub01 = read_clk(dev, 0x08); + perflvl->copy = read_clk(dev, 0x09); + perflvl->daemon = read_clk(dev, 0x0c); + perflvl->vdec = read_clk(dev, 0x0e); + return 0; +} + +struct nvc0_pm_clock { + u32 freq; + u32 ssel; + u32 mdiv; + u32 dsrc; + u32 ddiv; + u32 coef; +}; + +struct nvc0_pm_state { + struct nouveau_pm_level *perflvl; + struct nvc0_pm_clock eng[16]; + struct nvc0_pm_clock mem; +}; + +static u32 +calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv) +{ + u32 div = min((ref * 2) / freq, (u32)65); + if (div < 2) + div = 2; + + *ddiv = div - 2; + return (ref * 2) / div; +} + +static u32 +calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv) +{ + u32 sclk; + + /* use one of the fixed frequencies if possible */ + *ddiv = 0x00000000; + switch (freq) { + case 27000: + case 108000: + *dsrc = 0x00000000; + if (freq == 108000) + *dsrc |= 0x00030000; + return freq; + case 100000: + *dsrc = 0x00000002; + return freq; + default: + *dsrc = 0x00000003; + break; + } + + /* otherwise, calculate the closest divider */ + sclk = read_vco(dev, clk); + if (clk < 7) + sclk = calc_div(dev, clk, sclk, freq, ddiv); + return sclk; +} + +static u32 +calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_bios *bios = nouveau_bios(device); + struct nvbios_pll limits; + int N, M, P, ret; + + ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits); + if (ret) + return 0; + + limits.refclk = read_div(dev, clk, 0x137120, 0x137140); + if (!limits.refclk) + return 0; + + ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P); + if (ret <= 0) + return 0; + + *coef = (P << 16) | (N << 8) | M; + return ret; +} + +/* A (likely rather simplified and incomplete) view of the clock tree + * + * Key: + * + * S: source select + * D: divider + * P: pll + * F: switch + * + * Engine clocks: + * + * 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref + * (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref + * + * Not all registers exist for all clocks. For example: clocks >= 8 don't + * have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do + * they have the divider at 1371d0, though the source selection at 137160 + * still exists. You must use the divider at 137250 for these instead. + * + * Memory clock: + * + * TBD, read_mem() above is likely very wrong... + * + */ + +static int +calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq) +{ + u32 src0, div0, div1D, div1P = 0; + u32 clk0, clk1 = 0; + + /* invalid clock domain */ + if (!freq) + return 0; + + /* first possible path, using only dividers */ + clk0 = calc_src(dev, clk, freq, &src0, &div0); + clk0 = calc_div(dev, clk, clk0, freq, &div1D); + + /* see if we can get any closer using PLLs */ + if (clk0 != freq && (0x00004387 & (1 << clk))) { + if (clk < 7) + clk1 = calc_pll(dev, clk, freq, &info->coef); + else + clk1 = read_pll(dev, 0x1370e0); + clk1 = calc_div(dev, clk, clk1, freq, &div1P); + } + + /* select the method which gets closest to target freq */ + if (abs((int)freq - clk0) <= abs((int)freq - clk1)) { + info->dsrc = src0; + if (div0) { + info->ddiv |= 0x80000000; + info->ddiv |= div0 << 8; + info->ddiv |= div0; + } + if (div1D) { + info->mdiv |= 0x80000000; + info->mdiv |= div1D; + } + info->ssel = 0; + info->freq = clk0; + } else { + if (div1P) { + info->mdiv |= 0x80000000; + info->mdiv |= div1P << 8; + } + info->ssel = (1 << clk); + info->freq = clk1; + } + + return 0; +} + +static int +calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_bios *bios = nouveau_bios(device); + struct nvbios_pll pll; + int N, M, P, ret; + u32 ctrl; + + /* mclk pll input freq comes from another pll, make sure it's on */ + ctrl = nv_rd32(device, 0x132020); + if (!(ctrl & 0x00000001)) { + /* if not, program it to 567MHz. nfi where this value comes + * from - it looks like it's in the pll limits table for + * 132000 but the binary driver ignores all my attempts to + * change this value. + */ + nv_wr32(device, 0x137320, 0x00000103); + nv_wr32(device, 0x137330, 0x81200606); + nv_wait(device, 0x132020, 0x00010000, 0x00010000); + nv_wr32(device, 0x132024, 0x0001150f); + nv_mask(device, 0x132020, 0x00000001, 0x00000001); + nv_wait(device, 0x137390, 0x00020000, 0x00020000); + nv_mask(device, 0x132020, 0x00000004, 0x00000004); + } + + /* for the moment, until the clock tree is better understood, use + * pll mode for all clock frequencies + */ + ret = nvbios_pll_parse(bios, 0x132000, &pll); + if (ret == 0) { + pll.refclk = read_pll(dev, 0x132020); + if (pll.refclk) { + ret = nva3_calc_pll(dev, &pll, freq, &N, NULL, &M, &P); + if (ret > 0) { + info->coef = (P << 16) | (N << 8) | M; + return 0; + } + } + } + + return -EINVAL; +} + +void * +nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nvc0_pm_state *info; + int ret; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); + + /* NFI why this is still in the performance table, the ROPCs appear + * to get their clock from clock 2 ("hub07", actually hub05 on this + * chip, but, anyway...) as well. nvatiming confirms hub05 and ROP + * are always the same freq with the binary driver even when the + * performance table says they should differ. + */ + if (device->chipset == 0xd9) + perflvl->rop = 0; + + if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) || + (ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) || + (ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) || + (ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) || + (ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) || + (ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) || + (ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) || + (ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) { + kfree(info); + return ERR_PTR(ret); + } + + if (perflvl->memory) { + ret = calc_mem(dev, &info->mem, perflvl->memory); + if (ret) { + kfree(info); + return ERR_PTR(ret); + } + } + + info->perflvl = perflvl; + return info; +} + +static void +prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info) +{ + struct nouveau_device *device = nouveau_dev(dev); + + /* program dividers at 137160/1371d0 first */ + if (clk < 7 && !info->ssel) { + nv_mask(device, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv); + nv_wr32(device, 0x137160 + (clk * 0x04), info->dsrc); + } + + /* switch clock to non-pll mode */ + nv_mask(device, 0x137100, (1 << clk), 0x00000000); + nv_wait(device, 0x137100, (1 << clk), 0x00000000); + + /* reprogram pll */ + if (clk < 7) { + /* make sure it's disabled first... */ + u32 base = 0x137000 + (clk * 0x20); + u32 ctrl = nv_rd32(device, base + 0x00); + if (ctrl & 0x00000001) { + nv_mask(device, base + 0x00, 0x00000004, 0x00000000); + nv_mask(device, base + 0x00, 0x00000001, 0x00000000); + } + /* program it to new values, if necessary */ + if (info->ssel) { + nv_wr32(device, base + 0x04, info->coef); + nv_mask(device, base + 0x00, 0x00000001, 0x00000001); + nv_wait(device, base + 0x00, 0x00020000, 0x00020000); + nv_mask(device, base + 0x00, 0x00020004, 0x00000004); + } + } + + /* select pll/non-pll mode, and program final clock divider */ + nv_mask(device, 0x137100, (1 << clk), info->ssel); + nv_wait(device, 0x137100, (1 << clk), info->ssel); + nv_mask(device, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv); +} + +static void +mclk_precharge(struct nouveau_mem_exec_func *exec) +{ +} + +static void +mclk_refresh(struct nouveau_mem_exec_func *exec) +{ +} + +static void +mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + nv_wr32(device, 0x10f210, enable ? 0x80000000 : 0x00000000); +} + +static void +mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable) +{ +} + +static void +mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec) +{ + udelay((nsec + 500) / 1000); +} + +static u32 +mclk_mrg(struct nouveau_mem_exec_func *exec, int mr) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + struct nouveau_fb *pfb = nouveau_fb(device); + if (pfb->ram.type != NV_MEM_TYPE_GDDR5) { + if (mr <= 1) + return nv_rd32(device, 0x10f300 + ((mr - 0) * 4)); + return nv_rd32(device, 0x10f320 + ((mr - 2) * 4)); + } else { + if (mr == 0) + return nv_rd32(device, 0x10f300 + (mr * 4)); + else + if (mr <= 7) + return nv_rd32(device, 0x10f32c + (mr * 4)); + return nv_rd32(device, 0x10f34c); + } +} + +static void +mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + struct nouveau_fb *pfb = nouveau_fb(device); + if (pfb->ram.type != NV_MEM_TYPE_GDDR5) { + if (mr <= 1) { + nv_wr32(device, 0x10f300 + ((mr - 0) * 4), data); + if (pfb->ram.ranks > 1) + nv_wr32(device, 0x10f308 + ((mr - 0) * 4), data); + } else + if (mr <= 3) { + nv_wr32(device, 0x10f320 + ((mr - 2) * 4), data); + if (pfb->ram.ranks > 1) + nv_wr32(device, 0x10f328 + ((mr - 2) * 4), data); + } + } else { + if (mr == 0) nv_wr32(device, 0x10f300 + (mr * 4), data); + else if (mr <= 7) nv_wr32(device, 0x10f32c + (mr * 4), data); + else if (mr == 15) nv_wr32(device, 0x10f34c, data); + } +} + +static void +mclk_clock_set(struct nouveau_mem_exec_func *exec) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + struct nvc0_pm_state *info = exec->priv; + u32 ctrl = nv_rd32(device, 0x132000); + + nv_wr32(device, 0x137360, 0x00000001); + nv_wr32(device, 0x137370, 0x00000000); + nv_wr32(device, 0x137380, 0x00000000); + if (ctrl & 0x00000001) + nv_wr32(device, 0x132000, (ctrl &= ~0x00000001)); + + nv_wr32(device, 0x132004, info->mem.coef); + nv_wr32(device, 0x132000, (ctrl |= 0x00000001)); + nv_wait(device, 0x137390, 0x00000002, 0x00000002); + nv_wr32(device, 0x132018, 0x00005000); + + nv_wr32(device, 0x137370, 0x00000001); + nv_wr32(device, 0x137380, 0x00000001); + nv_wr32(device, 0x137360, 0x00000000); +} + +static void +mclk_timing_set(struct nouveau_mem_exec_func *exec) +{ + struct nouveau_device *device = nouveau_dev(exec->dev); + struct nvc0_pm_state *info = exec->priv; + struct nouveau_pm_level *perflvl = info->perflvl; + int i; + + for (i = 0; i < 5; i++) + nv_wr32(device, 0x10f290 + (i * 4), perflvl->timing.reg[i]); +} + +static void +prog_mem(struct drm_device *dev, struct nvc0_pm_state *info) +{ + struct nouveau_device *device = nouveau_dev(dev); + struct nouveau_mem_exec_func exec = { + .dev = dev, + .precharge = mclk_precharge, + .refresh = mclk_refresh, + .refresh_auto = mclk_refresh_auto, + .refresh_self = mclk_refresh_self, + .wait = mclk_wait, + .mrg = mclk_mrg, + .mrs = mclk_mrs, + .clock_set = mclk_clock_set, + .timing_set = mclk_timing_set, + .priv = info + }; + + if (device->chipset < 0xd0) + nv_wr32(device, 0x611200, 0x00003300); + else + nv_wr32(device, 0x62c000, 0x03030000); + + nouveau_mem_exec(&exec, info->perflvl); + + if (device->chipset < 0xd0) + nv_wr32(device, 0x611200, 0x00003330); + else + nv_wr32(device, 0x62c000, 0x03030300); +} +int +nvc0_pm_clocks_set(struct drm_device *dev, void *data) +{ + struct nvc0_pm_state *info = data; + int i; + + if (info->mem.coef) + prog_mem(dev, info); + + for (i = 0; i < 16; i++) { + if (!info->eng[i].freq) + continue; + prog_clk(dev, i, &info->eng[i]); + } + + kfree(info); + return 0; +} |