* 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel: (57 commits)
drm/i915: Handle ERESTARTSYS during page fault
drm/i915: Warn before mmaping a purgeable buffer.
drm/i915: Track purged state.
drm/i915: Remove eviction debug spam
drm/i915: Immediately discard any backing storage for uneeded objects
drm/i915: Do not mis-classify clean objects as purgeable
drm/i915: Whitespace correction for madv
drm/i915: BUG_ON page refleak during unbind
drm/i915: Search harder for a reusable object
drm/i915: Clean up evict from list.
drm/i915: Add tracepoints
drm/i915: framebuffer compression for GM45+
drm/i915: split display functions by chip type
drm/i915: Skip the sanity checks if the current relocation is valid
drm/i915: Check that the relocation points to within the target
drm/i915: correct FBC update when pipe base update occurs
drm/i915: blacklist Acer AspireOne lid status
ACPI: make ACPI button funcs no-ops if not built in
drm/i915: prevent FIFO calculation overflows on 32 bits with high dotclocks
drm/i915: intel_display.c handle latency variable efficiently
...
Fix up trivial conflicts in drivers/gpu/drm/i915/{i915_dma.c|i915_drv.h}
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/pfn.h>
+#include <linux/percpu.h>
#include <asm/e820.h>
#include <asm/processor.h>
mb();
}
+ EXPORT_SYMBOL_GPL(clflush_cache_range);
static void __cpa_flush_all(void *arg)
{
{
struct cpa_data alias_cpa;
unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
- unsigned long vaddr, remapped;
+ unsigned long vaddr;
int ret;
if (cpa->pfn >= max_pfn_mapped)
}
#endif
- /*
- * If the PMD page was partially used for per-cpu remapping,
- * the recycled area needs to be split and modified. Because
- * the area is always proper subset of a PMD page
- * cpa->numpages is guaranteed to be 1 for these areas, so
- * there's no need to loop over and check for further remaps.
- */
- remapped = (unsigned long)pcpu_lpage_remapped((void *)laddr);
- if (remapped) {
- WARN_ON(cpa->numpages > 1);
- alias_cpa = *cpa;
- alias_cpa.vaddr = &remapped;
- alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
- ret = __change_page_attr_set_clr(&alias_cpa, 0);
- if (ret)
- return ret;
- }
-
return 0;
}
{
struct cpa_data cpa;
int ret, cache, checkalias;
+ unsigned long baddr = 0;
/*
* Check, if we are requested to change a not supported
*/
WARN_ON_ONCE(1);
}
+ /*
+ * Save address for cache flush. *addr is modified in the call
+ * to __change_page_attr_set_clr() below.
+ */
+ baddr = *addr;
}
/* Must avoid aliasing mappings in the highmem code */
cpa_flush_array(addr, numpages, cache,
cpa.flags, pages);
} else
- cpa_flush_range(*addr, numpages, cache);
+ cpa_flush_range(baddr, numpages, cache);
} else
cpa_flush_all(cache);
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#define PREFIX "ACPI: "
+
#define ACPI_BUTTON_CLASS "button"
#define ACPI_BUTTON_FILE_INFO "info"
#define ACPI_BUTTON_FILE_STATE "state"
.release = single_release,
};
+ static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
+ static struct acpi_device *lid_device;
+
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
+ int acpi_lid_notifier_register(struct notifier_block *nb)
+ {
+ return blocking_notifier_chain_register(&acpi_lid_notifier, nb);
+ }
+ EXPORT_SYMBOL(acpi_lid_notifier_register);
+
+ int acpi_lid_notifier_unregister(struct notifier_block *nb)
+ {
+ return blocking_notifier_chain_unregister(&acpi_lid_notifier, nb);
+ }
+ EXPORT_SYMBOL(acpi_lid_notifier_unregister);
+
+ int acpi_lid_open(void)
+ {
+ acpi_status status;
+ unsigned long long state;
+
+ status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL,
+ &state);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return !!state;
+ }
+ EXPORT_SYMBOL(acpi_lid_open);
+
static int acpi_lid_send_state(struct acpi_device *device)
{
struct acpi_button *button = acpi_driver_data(device);
unsigned long long state;
acpi_status status;
+ int ret;
status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state);
if (ACPI_FAILURE(status))
/* input layer checks if event is redundant */
input_report_switch(button->input, SW_LID, !state);
input_sync(button->input);
- return 0;
+
+ ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
+ if (ret == NOTIFY_DONE)
+ ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
+ device);
+ return ret;
}
static void acpi_button_notify(struct acpi_device *device, u32 event)
error = input_register_device(input);
if (error)
goto err_remove_fs;
- if (button->type == ACPI_BUTTON_TYPE_LID)
+ if (button->type == ACPI_BUTTON_TYPE_LID) {
acpi_lid_send_state(device);
+ /*
+ * This assumes there's only one lid device, or if there are
+ * more we only care about the last one...
+ */
+ lid_device = device;
+ }
if (device->wakeup.flags.valid) {
/* Button's GPE is run-wake GPE */
#include <linux/agp_backend.h>
#include "agp.h"
+/*
+ * If we have Intel graphics, we're not going to have anything other than
+ * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
+ * on the Intel IOMMU support (CONFIG_DMAR).
+ * Only newer chipsets need to bother with this, of course.
+ */
+#ifdef CONFIG_DMAR
+#define USE_PCI_DMA_API 1
+#endif
+
#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
+ #define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
+ #define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
#define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB)
int resource_valid;
} intel_private;
+#ifdef USE_PCI_DMA_API
+static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
+{
+ *ret = pci_map_page(intel_private.pcidev, page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(intel_private.pcidev, *ret))
+ return -EINVAL;
+ return 0;
+}
+
+static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
+{
+ pci_unmap_page(intel_private.pcidev, dma,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+}
+
+static void intel_agp_free_sglist(struct agp_memory *mem)
+{
+ struct sg_table st;
+
+ st.sgl = mem->sg_list;
+ st.orig_nents = st.nents = mem->page_count;
+
+ sg_free_table(&st);
+
+ mem->sg_list = NULL;
+ mem->num_sg = 0;
+}
+
+static int intel_agp_map_memory(struct agp_memory *mem)
+{
+ struct sg_table st;
+ struct scatterlist *sg;
+ int i;
+
+ DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
+
+ if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
+ return -ENOMEM;
+
+ mem->sg_list = sg = st.sgl;
+
+ for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
+ sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
+
+ mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
+ mem->page_count, PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(!mem->num_sg)) {
+ intel_agp_free_sglist(mem);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void intel_agp_unmap_memory(struct agp_memory *mem)
+{
+ DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
+
+ pci_unmap_sg(intel_private.pcidev, mem->sg_list,
+ mem->page_count, PCI_DMA_BIDIRECTIONAL);
+ intel_agp_free_sglist(mem);
+}
+
+static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+ off_t pg_start, int mask_type)
+{
+ struct scatterlist *sg;
+ int i, j;
+
+ j = pg_start;
+
+ WARN_ON(!mem->num_sg);
+
+ if (mem->num_sg == mem->page_count) {
+ for_each_sg(mem->sg_list, sg, mem->page_count, i) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ sg_dma_address(sg), mask_type),
+ intel_private.gtt+j);
+ j++;
+ }
+ } else {
+ /* sg may merge pages, but we have to seperate
+ * per-page addr for GTT */
+ unsigned int len, m;
+
+ for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
+ len = sg_dma_len(sg) / PAGE_SIZE;
+ for (m = 0; m < len; m++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ sg_dma_address(sg) + m * PAGE_SIZE,
+ mask_type),
+ intel_private.gtt+j);
+ j++;
+ }
+ }
+ }
+ readl(intel_private.gtt+j-1);
+}
+
+#else
+
+static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+ off_t pg_start, int mask_type)
+{
+ int i, j;
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]), mask_type),
+ intel_private.gtt+j);
+ }
+
+ readl(intel_private.gtt+j-1);
+}
+
+#endif
+
static int intel_i810_fetch_size(void)
{
u32 smram_miscc;
global_cache_flush();
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
- mem->pages[i],
- mask_type),
+ page_to_phys(mem->pages[i]), mask_type),
intel_private.registers+I810_PTE_BASE+(j*4));
}
readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
}
static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
- struct page *page, int type)
+ dma_addr_t addr, int type)
{
- unsigned long addr = phys_to_gart(page_to_phys(page));
/* Type checking must be done elsewhere */
return addr | bridge->driver->masks[type].mask;
}
if (!intel_private.i8xx_page)
return;
- /* make page uncached */
- map_page_into_agp(intel_private.i8xx_page);
-
intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
if (!intel_private.i8xx_flush_page)
intel_i830_fini_flush();
}
+ static void
+ do_wbinvd(void *null)
+ {
+ wbinvd();
+ }
+
+ /* The chipset_flush interface needs to get data that has already been
+ * flushed out of the CPU all the way out to main memory, because the GPU
+ * doesn't snoop those buffers.
+ *
+ * The 8xx series doesn't have the same lovely interface for flushing the
+ * chipset write buffers that the later chips do. According to the 865
+ * specs, it's 64 octwords, or 1KB. So, to get those previous things in
+ * that buffer out, we just fill 1KB and clflush it out, on the assumption
+ * that it'll push whatever was in there out. It appears to work.
+ */
static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
{
unsigned int *pg = intel_private.i8xx_flush_page;
- int i;
- for (i = 0; i < 256; i += 2)
- *(pg + i) = i;
+ memset(pg, 0, 1024);
- wmb();
+ if (cpu_has_clflush) {
+ clflush_cache_range(pg, 1024);
+ } else {
+ if (on_each_cpu(do_wbinvd, NULL, 1) != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+ }
}
/* The intel i830 automatically initializes the agp aperture during POST.
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
- mem->pages[i], mask_type),
+ page_to_phys(mem->pages[i]), mask_type),
intel_private.registers+I810_PTE_BASE+(j*4));
}
readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
intel_i9xx_setup_flush();
+#ifdef USE_PCI_DMA_API
+ if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
+ dev_err(&intel_private.pcidev->dev,
+ "set gfx device dma mask 36bit failed!\n");
+#endif
+
return 0;
}
static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
- int i, j, num_entries;
+ int num_entries;
void *temp;
int ret = -EINVAL;
int mask_type;
if ((pg_start + mem->page_count) > num_entries)
goto out_err;
- /* The i915 can't check the GTT for entries since its read only,
+ /* The i915 can't check the GTT for entries since it's read only;
* depend on the caller to make the correct offset decisions.
*/
if (!mem->is_flushed)
global_cache_flush();
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- mem->pages[i], mask_type), intel_private.gtt+j);
- }
-
- readl(intel_private.gtt+j-1);
+ intel_agp_insert_sg_entries(mem, pg_start, mask_type);
agp_bridge->driver->tlb_flush(mem);
out:
* this conditional.
*/
static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
- struct page *page, int type)
+ dma_addr_t addr, int type)
{
- dma_addr_t addr = phys_to_gart(page_to_phys(page));
/* Shift high bits down */
addr |= (addr >> 28) & 0xf0;
case PCI_DEVICE_ID_INTEL_Q45_HB:
case PCI_DEVICE_ID_INTEL_G45_HB:
case PCI_DEVICE_ID_INTEL_G41_HB:
+ case PCI_DEVICE_ID_INTEL_B43_HB:
case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
.chipset_flush = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+ .agp_map_page = intel_agp_map_page,
+ .agp_unmap_page = intel_agp_unmap_page,
+ .agp_map_memory = intel_agp_map_memory,
+ .agp_unmap_memory = intel_agp_unmap_memory,
+#endif
};
static const struct agp_bridge_driver intel_i965_driver = {
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
.chipset_flush = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+ .agp_map_page = intel_agp_map_page,
+ .agp_unmap_page = intel_agp_unmap_page,
+ .agp_map_memory = intel_agp_map_memory,
+ .agp_unmap_memory = intel_agp_unmap_memory,
+#endif
};
static const struct agp_bridge_driver intel_7505_driver = {
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
.chipset_flush = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+ .agp_map_page = intel_agp_map_page,
+ .agp_unmap_page = intel_agp_unmap_page,
+ .agp_map_memory = intel_agp_map_memory,
+ .agp_unmap_memory = intel_agp_unmap_memory,
+#endif
};
static int find_gmch(u16 device)
"Q45/Q43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
"G45/G43", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0,
+ "B43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
"G41", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0,
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
int ret_val;
- pci_restore_state(pdev);
-
- /* We should restore our graphics device's config space,
- * as host bridge (00:00) resumes before graphics device (02:00),
- * then our access to its pci space can work right.
- */
- if (intel_private.pcidev)
- pci_restore_state(intel_private.pcidev);
-
if (bridge->driver == &intel_generic_driver)
intel_configure();
else if (bridge->driver == &intel_850_driver)
ID(PCI_DEVICE_ID_INTEL_Q45_HB),
ID(PCI_DEVICE_ID_INTEL_G45_HB),
ID(PCI_DEVICE_ID_INTEL_G41_HB),
+ ID(PCI_DEVICE_ID_INTEL_B43_HB),
ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
+ #include "i915_trace.h"
+#include <linux/vgaarb.h>
/* Really want an OS-independent resettable timer. Would like to have
* this loop run for (eg) 3 sec, but have the timer reset every time
u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
int i;
+ trace_i915_ring_wait_begin (dev);
+
for (i = 0; i < 100000; i++) {
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
acthd = I915_READ(acthd_reg);
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
- if (ring->space >= n)
+ if (ring->space >= n) {
+ trace_i915_ring_wait_end (dev);
return 0;
+ }
if (dev->primary->master) {
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
}
+ trace_i915_ring_wait_end (dev);
return -EBUSY;
}
* how much was set aside so we can use it for our own purposes.
*/
static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
- uint32_t *preallocated_size)
+ uint32_t *preallocated_size,
+ uint32_t *start)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 tmp = 0;
return -1;
}
*preallocated_size = stolen - overhead;
+ *start = overhead;
return 0;
}
+ #define PTE_ADDRESS_MASK 0xfffff000
+ #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
+ #define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
+ #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
+ #define PTE_MAPPING_TYPE_CACHED (3 << 1)
+ #define PTE_MAPPING_TYPE_MASK (3 << 1)
+ #define PTE_VALID (1 << 0)
+
+ /**
+ * i915_gtt_to_phys - take a GTT address and turn it into a physical one
+ * @dev: drm device
+ * @gtt_addr: address to translate
+ *
+ * Some chip functions require allocations from stolen space but need the
+ * physical address of the memory in question. We use this routine
+ * to get a physical address suitable for register programming from a given
+ * GTT address.
+ */
+ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
+ unsigned long gtt_addr)
+ {
+ unsigned long *gtt;
+ unsigned long entry, phys;
+ int gtt_bar = IS_I9XX(dev) ? 0 : 1;
+ int gtt_offset, gtt_size;
+
+ if (IS_I965G(dev)) {
+ if (IS_G4X(dev) || IS_IGDNG(dev)) {
+ gtt_offset = 2*1024*1024;
+ gtt_size = 2*1024*1024;
+ } else {
+ gtt_offset = 512*1024;
+ gtt_size = 512*1024;
+ }
+ } else {
+ gtt_bar = 3;
+ gtt_offset = 0;
+ gtt_size = pci_resource_len(dev->pdev, gtt_bar);
+ }
+
+ gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset,
+ gtt_size);
+ if (!gtt) {
+ DRM_ERROR("ioremap of GTT failed\n");
+ return 0;
+ }
+
+ entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
+
+ DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
+
+ /* Mask out these reserved bits on this hardware. */
+ if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
+ IS_I945G(dev) || IS_I945GM(dev)) {
+ entry &= ~PTE_ADDRESS_MASK_HIGH;
+ }
+
+ /* If it's not a mapping type we know, then bail. */
+ if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
+ (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) {
+ iounmap(gtt);
+ return 0;
+ }
+
+ if (!(entry & PTE_VALID)) {
+ DRM_ERROR("bad GTT entry in stolen space\n");
+ iounmap(gtt);
+ return 0;
+ }
+
+ iounmap(gtt);
+
+ phys =(entry & PTE_ADDRESS_MASK) |
+ ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
+
+ DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
+
+ return phys;
+ }
+
+ static void i915_warn_stolen(struct drm_device *dev)
+ {
+ DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
+ DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
+ }
+
+ static void i915_setup_compression(struct drm_device *dev, int size)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mm_node *compressed_fb, *compressed_llb;
+ unsigned long cfb_base, ll_base;
+
+ /* Leave 1M for line length buffer & misc. */
+ compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
+ if (!compressed_fb) {
+ i915_warn_stolen(dev);
+ return;
+ }
+
+ compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
+ if (!compressed_fb) {
+ i915_warn_stolen(dev);
+ return;
+ }
+
+ cfb_base = i915_gtt_to_phys(dev, compressed_fb->start);
+ if (!cfb_base) {
+ DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
+ drm_mm_put_block(compressed_fb);
+ }
+
+ if (!IS_GM45(dev)) {
+ compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
+ 4096, 0);
+ if (!compressed_llb) {
+ i915_warn_stolen(dev);
+ return;
+ }
+
+ compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096);
+ if (!compressed_llb) {
+ i915_warn_stolen(dev);
+ return;
+ }
+
+ ll_base = i915_gtt_to_phys(dev, compressed_llb->start);
+ if (!ll_base) {
+ DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
+ drm_mm_put_block(compressed_fb);
+ drm_mm_put_block(compressed_llb);
+ }
+ }
+
+ dev_priv->cfb_size = size;
+
+ if (IS_GM45(dev)) {
+ g4x_disable_fbc(dev);
+ I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+ } else {
+ i8xx_disable_fbc(dev);
+ I915_WRITE(FBC_CFB_BASE, cfb_base);
+ I915_WRITE(FBC_LL_BASE, ll_base);
+ }
+
+ DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
+ ll_base, size >> 20);
+ }
+
+/* true = enable decode, false = disable decoder */
+static unsigned int i915_vga_set_decode(void *cookie, bool state)
+{
+ struct drm_device *dev = cookie;
+
+ intel_modeset_vga_set_state(dev, state);
+ if (state)
+ return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+ else
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
static int i915_load_modeset_init(struct drm_device *dev,
+ unsigned long prealloc_start,
unsigned long prealloc_size,
unsigned long agp_size)
{
/* Basic memrange allocator for stolen space (aka vram) */
drm_mm_init(&dev_priv->vram, 0, prealloc_size);
+ DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
+
+ /* We're off and running w/KMS */
+ dev_priv->mm.suspended = 0;
/* Let GEM Manage from end of prealloc space to end of aperture.
*
*/
i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
+ mutex_lock(&dev->struct_mutex);
ret = i915_gem_init_ringbuffer(dev);
+ mutex_unlock(&dev->struct_mutex);
if (ret)
goto out;
+ /* Try to set up FBC with a reasonable compressed buffer size */
+ if (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev) || IS_GM45(dev)) &&
+ i915_powersave) {
+ int cfb_size;
+
+ /* Try to get an 8M buffer... */
+ if (prealloc_size > (9*1024*1024))
+ cfb_size = 8*1024*1024;
+ else /* fall back to 7/8 of the stolen space */
+ cfb_size = prealloc_size * 7 / 8;
+ i915_setup_compression(dev, cfb_size);
+ }
+
/* Allow hardware batchbuffers unless told otherwise.
*/
dev_priv->allow_batchbuffer = 1;
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
+ /* if we have > 1 VGA cards, then disable the radeon VGA resources */
+ ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+ if (ret)
+ goto destroy_ringbuffer;
+
ret = drm_irq_install(dev);
if (ret)
goto destroy_ringbuffer;
struct drm_i915_private *dev_priv = dev->dev_private;
resource_size_t base, size;
int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
- uint32_t agp_size, prealloc_size;
+ uint32_t agp_size, prealloc_size, prealloc_start;
/* i915 has 4 more counters */
dev->counters += 4;
"performance may suffer.\n");
}
- ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
+ ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start);
if (ret)
goto out_iomapfree;
return ret;
}
+ /* Start out suspended */
+ dev_priv->mm.suspended = 1;
+
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
+ ret = i915_load_modeset_init(dev, prealloc_start,
+ prealloc_size, agp_size);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
goto out_workqueue_free;
if (!IS_IGDNG(dev))
intel_opregion_init(dev, 0);
+ setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
+ (unsigned long) dev);
return 0;
out_workqueue_free:
struct drm_i915_private *dev_priv = dev->dev_private;
destroy_workqueue(dev_priv->wq);
+ del_timer_sync(&dev_priv->hangcheck_timer);
io_mapping_free(dev_priv->mm.gtt_mapping);
if (dev_priv->mm.gtt_mtrr >= 0) {
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
drm_irq_uninstall(dev);
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
}
if (dev->pdev->msi_enabled)
DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
PIPE_B,
};
+ enum plane {
+ PLANE_A = 0,
+ PLANE_B,
+ };
+
#define I915_NUM_PIPE 2
/* Interface history:
struct timeval time;
};
+ struct drm_i915_display_funcs {
+ void (*dpms)(struct drm_crtc *crtc, int mode);
+ bool (*fbc_enabled)(struct drm_crtc *crtc);
+ void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
+ void (*disable_fbc)(struct drm_device *dev);
+ int (*get_display_clock_speed)(struct drm_device *dev);
+ int (*get_fifo_size)(struct drm_device *dev, int plane);
+ void (*update_wm)(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size);
+ /* clock updates for mode set */
+ /* cursor updates */
+ /* render clock increase/decrease */
+ /* display clock increase/decrease */
+ /* pll clock increase/decrease */
+ /* clock gating init */
+ };
+
typedef struct drm_i915_private {
struct drm_device *dev;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
+ /* For hangcheck timer */
+ #define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
+ struct timer_list hangcheck_timer;
+ int hangcheck_count;
+ uint32_t last_acthd;
+
bool cursor_needs_physical;
struct drm_mm vram;
+ unsigned long cfb_size;
+ unsigned long cfb_pitch;
+ int cfb_fence;
+ int cfb_plane;
+
int irq_enabled;
struct intel_opregion opregion;
unsigned int edp_support:1;
int lvds_ssc_freq;
+ struct notifier_block lid_notifier;
+
int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
struct work_struct error_work;
struct workqueue_struct *wq;
+ /* Display functions */
+ struct drm_i915_display_funcs display;
+
/* Register state */
+ bool suspended;
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
struct io_mapping *gtt_mapping;
int gtt_mtrr;
+ /**
+ * Membership on list of all loaded devices, used to evict
+ * inactive buffers under memory pressure.
+ *
+ * Modifications should only be done whilst holding the
+ * shrink_list_lock spinlock.
+ */
+ struct list_head shrink_list;
+
/**
* List of objects currently involved in rendering from the
* ringbuffer.
* It prevents command submission from occuring and makes
* every pending request fail
*/
- int wedged;
+ atomic_t wedged;
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
* This is the same as gtt_space->start
*/
uint32_t gtt_offset;
- /**
- * Required alignment for the object
- */
- uint32_t gtt_alignment;
+
/**
* Fake offset for use by mmap(2)
*/
* in an execbuffer object list.
*/
int in_execbuffer;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ int madv;
};
/**
extern unsigned int i915_fbpercrtc;
extern unsigned int i915_powersave;
+ extern void i915_save_display(struct drm_device *dev);
+ extern void i915_restore_display(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *boxes,
int i, int DR1, int DR4);
+ extern int i965_reset(struct drm_device *dev, u8 flags);
/* i915_irq.c */
+ void i915_hangcheck_elapsed(unsigned long data);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+ int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
void i915_gem_release_mmap(struct drm_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
uint32_t i915_get_gem_seqno(struct drm_device *dev);
+ bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_object_put_pages(struct drm_gem_object *obj);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+ void i915_gem_shrinker_init(void);
+ void i915_gem_shrinker_exit(void);
+
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
/* modesetting */
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
+extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+ extern void i8xx_disable_fbc(struct drm_device *dev);
+ extern void g4x_disable_fbc(struct drm_device *dev);
/**
* Lock test for when it's just for synchronization of ring access.
(dev)->pci_device == 0x2E12 || \
(dev)->pci_device == 0x2E22 || \
(dev)->pci_device == 0x2E32 || \
+ (dev)->pci_device == 0x2E42 || \
(dev)->pci_device == 0x0042 || \
(dev)->pci_device == 0x0046)
(dev)->pci_device == 0x2E12 || \
(dev)->pci_device == 0x2E22 || \
(dev)->pci_device == 0x2E32 || \
+ (dev)->pci_device == 0x2E42 || \
IS_GM45(dev))
#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev))
- #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))
+ #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev))
#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+ #define I915_HAS_FBC(dev) (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev)))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
+ #include "i915_trace.h"
#include "intel_drv.h"
#include <linux/swap.h>
#include <linux/pci.h>
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
- static int i915_gem_evict_something(struct drm_device *dev);
+ static int i915_gem_evict_something(struct drm_device *dev, int min_size);
+ static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv);
+ static LIST_HEAD(shrink_list);
+ static DEFINE_SPINLOCK(shrink_list_lock);
+
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end)
{
return ret;
}
+ static inline gfp_t
+ i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
+ {
+ return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
+ }
+
+ static inline void
+ i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
+ {
+ mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
+ }
+
+ static int
+ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
+ {
+ int ret;
+
+ ret = i915_gem_object_get_pages(obj);
+
+ /* If we've insufficient memory to map in the pages, attempt
+ * to make some space by throwing out some old buffers.
+ */
+ if (ret == -ENOMEM) {
+ struct drm_device *dev = obj->dev;
+ gfp_t gfp;
+
+ ret = i915_gem_evict_something(dev, obj->size);
+ if (ret)
+ return ret;
+
+ gfp = i915_gem_object_get_page_gfp_mask(obj);
+ i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
+ ret = i915_gem_object_get_pages(obj);
+ i915_gem_object_set_page_gfp_mask (obj, gfp);
+ }
+
+ return ret;
+ }
+
/**
* This is the fallback shmem pread path, which allocates temporary storage
* in kernel space to copy_to_user into outside of the struct_mutex, so we
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_get_pages(obj);
- if (ret != 0)
+ ret = i915_gem_object_get_pages_or_evict(obj);
+ if (ret)
goto fail_unlock;
ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_get_pages(obj);
- if (ret != 0)
+ ret = i915_gem_object_get_pages_or_evict(obj);
+ if (ret)
goto fail_unlock;
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
/* Now bind it into the GTT if needed */
mutex_lock(&dev->struct_mutex);
if (!obj_priv->gtt_space) {
- ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return VM_FAULT_SIGBUS;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return VM_FAULT_SIGBUS;
- }
+ ret = i915_gem_object_bind_to_gtt(obj, 0);
+ if (ret)
+ goto unlock;
list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unlock;
}
/* Need a new fence register? */
if (obj_priv->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence_reg(obj);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return VM_FAULT_SIGBUS;
- }
+ if (ret)
+ goto unlock;
}
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
-
+ unlock:
mutex_unlock(&dev->struct_mutex);
switch (ret) {
+ case 0:
+ case -ERESTARTSYS:
+ return VM_FAULT_NOPAGE;
case -ENOMEM:
case -EAGAIN:
return VM_FAULT_OOM;
- case -EFAULT:
- case -EINVAL:
- return VM_FAULT_SIGBUS;
default:
- return VM_FAULT_NOPAGE;
+ return VM_FAULT_SIGBUS;
}
}
obj_priv = obj->driver_private;
+ if (obj_priv->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to mmap a purgeable buffer\n");
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+
if (!obj_priv->mmap_offset) {
ret = i915_gem_create_mmap_offset(obj);
if (ret) {
args->offset = obj_priv->mmap_offset;
- obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
-
- /* Make sure the alignment is correct for fence regs etc */
- if (obj_priv->agp_mem &&
- (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
/*
* Pull it into the GTT so that we have a page list (makes the
* initial fault faster and any subsequent flushing possible).
*/
if (!obj_priv->agp_mem) {
- ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
+ ret = i915_gem_object_bind_to_gtt(obj, 0);
if (ret) {
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
int i;
BUG_ON(obj_priv->pages_refcount == 0);
+ BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
if (--obj_priv->pages_refcount != 0)
return;
if (obj_priv->tiling_mode != I915_TILING_NONE)
i915_gem_object_save_bit_17_swizzle(obj);
- for (i = 0; i < page_count; i++)
- if (obj_priv->pages[i] != NULL) {
- if (obj_priv->dirty)
- set_page_dirty(obj_priv->pages[i]);
+ if (obj_priv->madv == I915_MADV_DONTNEED)
+ obj_priv->dirty = 0;
+
+ for (i = 0; i < page_count; i++) {
+ if (obj_priv->pages[i] == NULL)
+ break;
+
+ if (obj_priv->dirty)
+ set_page_dirty(obj_priv->pages[i]);
+
+ if (obj_priv->madv == I915_MADV_WILLNEED)
mark_page_accessed(obj_priv->pages[i]);
- page_cache_release(obj_priv->pages[i]);
- }
+
+ page_cache_release(obj_priv->pages[i]);
+ }
obj_priv->dirty = 0;
drm_free_large(obj_priv->pages);
obj_priv->last_rendering_seqno = 0;
}
+ /* Immediately discard the backing storage */
+ static void
+ i915_gem_object_truncate(struct drm_gem_object *obj)
+ {
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct inode *inode;
+
+ inode = obj->filp->f_path.dentry->d_inode;
+ if (inode->i_op->truncate)
+ inode->i_op->truncate (inode);
+
+ obj_priv->madv = __I915_MADV_PURGED;
+ }
+
+ static inline int
+ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+ {
+ return obj_priv->madv == I915_MADV_DONTNEED;
+ }
+
static void
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
{
if ((obj->write_domain & flush_domains) ==
obj->write_domain) {
+ uint32_t old_write_domain = obj->write_domain;
+
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
}
}
- if (was_empty && !dev_priv->mm.suspended)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ if (!dev_priv->mm.suspended) {
+ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ if (was_empty)
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ }
return seqno;
}
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ trace_i915_gem_request_retire(dev, request->seqno);
+
/* Move any buffers on the active list that are no longer referenced
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
/**
* Returns true if seq1 is later than seq2.
*/
- static int
+ bool
i915_seqno_passed(uint32_t seq1, uint32_t seq2)
{
return (int32_t)(seq1 - seq2) >= 0;
retiring_seqno = request->seqno;
if (i915_seqno_passed(seqno, retiring_seqno) ||
- dev_priv->mm.wedged) {
+ atomic_read(&dev_priv->mm.wedged)) {
i915_gem_retire_request(dev, request);
list_del(&request->list);
BUG_ON(seqno == 0);
+ if (atomic_read(&dev_priv->mm.wedged))
+ return -EIO;
+
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
if (IS_IGDNG(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
i915_driver_irq_postinstall(dev);
}
+ trace_i915_gem_request_wait_begin(dev, seqno);
+
dev_priv->mm.waiting_gem_seqno = seqno;
i915_user_irq_get(dev);
ret = wait_event_interruptible(dev_priv->irq_queue,
i915_seqno_passed(i915_get_gem_seqno(dev),
seqno) ||
- dev_priv->mm.wedged);
+ atomic_read(&dev_priv->mm.wedged));
i915_user_irq_put(dev);
dev_priv->mm.waiting_gem_seqno = 0;
+
+ trace_i915_gem_request_wait_end(dev, seqno);
}
- if (dev_priv->mm.wedged)
+ if (atomic_read(&dev_priv->mm.wedged))
ret = -EIO;
if (ret && ret != -ERESTARTSYS)
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
invalidate_domains, flush_domains);
#endif
+ trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
+ invalidate_domains, flush_domains);
if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
return -EINVAL;
}
+ /* blow away mappings if mapped through GTT */
+ i915_gem_release_mmap(obj);
+
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ i915_gem_clear_fence_reg(obj);
+
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
return ret;
}
+ BUG_ON(obj_priv->active);
+
if (obj_priv->agp_mem != NULL) {
drm_unbind_agp(obj_priv->agp_mem);
drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
obj_priv->agp_mem = NULL;
}
- BUG_ON(obj_priv->active);
-
- /* blow away mappings if mapped through GTT */
- i915_gem_release_mmap(obj);
-
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- i915_gem_clear_fence_reg(obj);
-
i915_gem_object_put_pages(obj);
+ BUG_ON(obj_priv->pages_refcount);
if (obj_priv->gtt_space) {
atomic_dec(&dev->gtt_count);
if (!list_empty(&obj_priv->list))
list_del_init(&obj_priv->list);
+ if (i915_gem_object_is_purgeable(obj_priv))
+ i915_gem_object_truncate(obj);
+
+ trace_i915_gem_object_unbind(obj);
+
return 0;
}
+ static struct drm_gem_object *
+ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *best = NULL;
+ struct drm_gem_object *first = NULL;
+
+ /* Try to find the smallest clean object */
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+ if (obj->size >= min_size) {
+ if ((!obj_priv->dirty ||
+ i915_gem_object_is_purgeable(obj_priv)) &&
+ (!best || obj->size < best->size)) {
+ best = obj;
+ if (best->size == min_size)
+ return best;
+ }
+ if (!first)
+ first = obj;
+ }
+ }
+
+ return best ? best : first;
+ }
+
static int
- i915_gem_evict_something(struct drm_device *dev)
+ i915_gem_evict_everything(struct drm_device *dev)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t seqno;
+ int ret;
+ bool lists_empty;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ if (lists_empty)
+ return -ENOSPC;
+
+ /* Flush everything (on to the inactive lists) and evict */
+ i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
+ if (seqno == 0)
+ return -ENOMEM;
+
+ ret = i915_wait_request(dev, seqno);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_evict_from_inactive_list(dev);
+ if (ret)
+ return ret;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+ BUG_ON(!lists_empty);
+
+ return 0;
+ }
+
+ static int
+ i915_gem_evict_something(struct drm_device *dev, int min_size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret = 0;
+ int ret;
for (;;) {
+ i915_gem_retire_requests(dev);
+
/* If there's an inactive buffer available now, grab it
* and be done.
*/
- if (!list_empty(&dev_priv->mm.inactive_list)) {
- obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
- BUG_ON(obj_priv->pin_count != 0);
+ obj = i915_gem_find_inactive_object(dev, min_size);
+ if (obj) {
+ struct drm_i915_gem_object *obj_priv;
+
#if WATCH_LRU
DRM_INFO("%s: evicting %p\n", __func__, obj);
#endif
+ obj_priv = obj->driver_private;
+ BUG_ON(obj_priv->pin_count != 0);
BUG_ON(obj_priv->active);
/* Wait on the rendering and unbind the buffer. */
- ret = i915_gem_object_unbind(obj);
- break;
+ return i915_gem_object_unbind(obj);
}
/* If we didn't get anything, but the ring is still processing
- * things, wait for one of those things to finish and hopefully
- * leave us a buffer to evict.
+ * things, wait for the next to finish and hopefully leave us
+ * a buffer to evict.
*/
if (!list_empty(&dev_priv->mm.request_list)) {
struct drm_i915_gem_request *request;
ret = i915_wait_request(dev, request->seqno);
if (ret)
- break;
+ return ret;
- /* if waiting caused an object to become inactive,
- * then loop around and wait for it. Otherwise, we
- * assume that waiting freed and unbound something,
- * so there should now be some space in the GTT
- */
- if (!list_empty(&dev_priv->mm.inactive_list))
- continue;
- break;
+ continue;
}
/* If we didn't have anything on the request list but there
* will get moved to inactive.
*/
if (!list_empty(&dev_priv->mm.flushing_list)) {
- obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
+ struct drm_i915_gem_object *obj_priv;
- i915_gem_flush(dev,
- obj->write_domain,
- obj->write_domain);
- i915_add_request(dev, NULL, obj->write_domain);
+ /* Find an object that we can immediately reuse */
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+ obj = obj_priv->obj;
+ if (obj->size >= min_size)
+ break;
- obj = NULL;
- continue;
- }
+ obj = NULL;
+ }
- DRM_ERROR("inactive empty %d request empty %d "
- "flushing empty %d\n",
- list_empty(&dev_priv->mm.inactive_list),
- list_empty(&dev_priv->mm.request_list),
- list_empty(&dev_priv->mm.flushing_list));
- /* If we didn't do any of the above, there's nothing to be done
- * and we just can't fit it in.
- */
- return -ENOSPC;
- }
- return ret;
- }
+ if (obj != NULL) {
+ uint32_t seqno;
- static int
- i915_gem_evict_everything(struct drm_device *dev)
- {
- int ret;
+ i915_gem_flush(dev,
+ obj->write_domain,
+ obj->write_domain);
+ seqno = i915_add_request(dev, NULL, obj->write_domain);
+ if (seqno == 0)
+ return -ENOMEM;
- for (;;) {
- ret = i915_gem_evict_something(dev);
- if (ret != 0)
- break;
+ ret = i915_wait_request(dev, seqno);
+ if (ret)
+ return ret;
+
+ continue;
+ }
+ }
+
+ /* If we didn't do any of the above, there's no single buffer
+ * large enough to swap out for the new one, so just evict
+ * everything and start again. (This should be rare.)
+ */
+ if (!list_empty (&dev_priv->mm.inactive_list))
+ return i915_gem_evict_from_inactive_list(dev);
+ else
+ return i915_gem_evict_everything(dev);
}
- if (ret == -ENOSPC)
- return 0;
- return ret;
}
int
BUG_ON(obj_priv->pages != NULL);
obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
if (obj_priv->pages == NULL) {
- DRM_ERROR("Faled to allocate page list\n");
obj_priv->pages_refcount--;
return -ENOMEM;
}
page = read_mapping_page(mapping, i, NULL);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
- DRM_ERROR("read_mapping_page failed: %d\n", ret);
i915_gem_object_put_pages(obj);
return ret;
}
else
i830_write_fence_reg(reg);
+ trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
+
return 0;
}
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
struct drm_mm_node *free_space;
- int page_count, ret;
+ bool retry_alloc = false;
+ int ret;
if (dev_priv->mm.suspended)
return -EBUSY;
+
+ if (obj_priv->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to bind a purgeable object\n");
+ return -EINVAL;
+ }
+
if (alignment == 0)
alignment = i915_gem_get_gtt_alignment(obj);
if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
}
}
if (obj_priv->gtt_space == NULL) {
- bool lists_empty;
-
/* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory.
*/
#if WATCH_LRU
DRM_INFO("%s: GTT full, evicting something\n", __func__);
#endif
- spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
- spin_unlock(&dev_priv->mm.active_list_lock);
- if (lists_empty) {
- DRM_ERROR("GTT full, but LRU list empty\n");
- return -ENOSPC;
- }
-
- ret = i915_gem_evict_something(dev);
- if (ret != 0) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Failed to evict a buffer %d\n", ret);
+ ret = i915_gem_evict_something(dev, obj->size);
+ if (ret)
return ret;
- }
+
goto search_free;
}
DRM_INFO("Binding object of size %zd at 0x%08x\n",
obj->size, obj_priv->gtt_offset);
#endif
+ if (retry_alloc) {
+ i915_gem_object_set_page_gfp_mask (obj,
+ i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
+ }
ret = i915_gem_object_get_pages(obj);
+ if (retry_alloc) {
+ i915_gem_object_set_page_gfp_mask (obj,
+ i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
+ }
if (ret) {
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
+
+ if (ret == -ENOMEM) {
+ /* first try to clear up some space from the GTT */
+ ret = i915_gem_evict_something(dev, obj->size);
+ if (ret) {
+ /* now try to shrink everyone else */
+ if (! retry_alloc) {
+ retry_alloc = true;
+ goto search_free;
+ }
+
+ return ret;
+ }
+
+ goto search_free;
+ }
+
return ret;
}
- page_count = obj->size / PAGE_SIZE;
/* Create an AGP memory structure pointing at our pages, and bind it
* into the GTT.
*/
obj_priv->agp_mem = drm_agp_bind_pages(dev,
obj_priv->pages,
- page_count,
+ obj->size >> PAGE_SHIFT,
obj_priv->gtt_offset,
obj_priv->agp_type);
if (obj_priv->agp_mem == NULL) {
i915_gem_object_put_pages(obj);
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
- return -ENOMEM;
+
+ ret = i915_gem_evict_something(dev, obj->size);
+ if (ret)
+ return ret;
+
+ goto search_free;
}
atomic_inc(&dev->gtt_count);
atomic_add(obj->size, &dev->gtt_memory);
BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+ trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
+
return 0;
}
if (obj_priv->pages == NULL)
return;
- /* XXX: The 865 in particular appears to be weird in how it handles
- * cache flushing. We haven't figured it out, but the
- * clflush+agp_chipset_flush doesn't appear to successfully get the
- * data visible to the PGU, while wbinvd + agp_chipset_flush does.
- */
- if (IS_I865G(obj->dev)) {
- wbinvd();
- return;
- }
+ trace_i915_gem_object_clflush(obj);
drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
}
{
struct drm_device *dev = obj->dev;
uint32_t seqno;
+ uint32_t old_write_domain;
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
return;
/* Queue the GPU write cache flushing we need. */
+ old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
seqno = i915_add_request(dev, NULL, obj->write_domain);
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/** Flushes the GTT write domain for the object if it's dirty. */
static void
i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
{
+ uint32_t old_write_domain;
+
if (obj->write_domain != I915_GEM_DOMAIN_GTT)
return;
* to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache.
*/
+ old_write_domain = obj->write_domain;
obj->write_domain = 0;
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/** Flushes the CPU write domain for the object if it's dirty. */
i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ uint32_t old_write_domain;
if (obj->write_domain != I915_GEM_DOMAIN_CPU)
return;
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
+ old_write_domain = obj->write_domain;
obj->write_domain = 0;
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/**
i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t old_write_domain, old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
if (ret != 0)
return ret;
+ old_write_domain = obj->write_domain;
+ old_read_domains = obj->read_domains;
+
/* If we're writing through the GTT domain, then CPU and GPU caches
* will need to be invalidated at next use.
*/
obj_priv->dirty = 1;
}
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+
return 0;
}
static int
i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
{
+ uint32_t old_write_domain, old_read_domains;
int ret;
i915_gem_object_flush_gpu_write_domain(obj);
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
+ old_write_domain = obj->write_domain;
+ old_read_domains = obj->read_domains;
+
/* Flush the CPU cache if it's still invalid. */
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU;
}
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+
return 0;
}
struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
+ uint32_t old_read_domains;
BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
i915_gem_clflush_object(obj);
}
+ old_read_domains = obj->read_domains;
+
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
* of our domain changes in execbuffers (which clears objects'
obj->read_domains, obj->write_domain,
dev->invalidate_domains, dev->flush_domains);
#endif
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ obj->write_domain);
}
/**
uint64_t offset, uint64_t size)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t old_read_domains;
int i, ret;
if (offset == 0 && size == obj->size)
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ old_read_domains = obj->read_domains;
obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ obj->write_domain);
+
return 0;
}
}
target_obj_priv = target_obj->driver_private;
+ #if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc->offset,
+ (int) reloc->target_handle,
+ (int) reloc->read_domains,
+ (int) reloc->write_domain,
+ (int) target_obj_priv->gtt_offset,
+ (int) reloc->presumed_offset,
+ reloc->delta);
+ #endif
+
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
return -EINVAL;
}
- if (reloc->offset > obj->size - 4) {
- DRM_ERROR("Relocation beyond object bounds: "
- "obj %p target %d offset %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset, (int) obj->size);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
- if (reloc->offset & 3) {
- DRM_ERROR("Relocation not 4-byte aligned: "
- "obj %p target %d offset %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
-
+ /* Validate that the target is in a valid r/w GPU domain */
if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
reloc->read_domains & I915_GEM_DOMAIN_CPU) {
DRM_ERROR("reloc with read/write CPU domains: "
i915_gem_object_unpin(obj);
return -EINVAL;
}
-
if (reloc->write_domain && target_obj->pending_write_domain &&
reloc->write_domain != target_obj->pending_write_domain) {
DRM_ERROR("Write domain conflict: "
return -EINVAL;
}
- #if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc->offset,
- (int) reloc->target_handle,
- (int) reloc->read_domains,
- (int) reloc->write_domain,
- (int) target_obj_priv->gtt_offset,
- (int) reloc->presumed_offset,
- reloc->delta);
- #endif
-
target_obj->pending_read_domains |= reloc->read_domains;
target_obj->pending_write_domain |= reloc->write_domain;
continue;
}
+ /* Check that the relocation address is valid... */
+ if (reloc->offset > obj->size - 4) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset, (int) obj->size);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+ if (reloc->offset & 3) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+
+ /* and points to somewhere within the target object. */
+ if (reloc->delta >= target_obj->size) {
+ DRM_ERROR("Relocation beyond target object bounds: "
+ "obj %p target %d delta %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->delta, (int) target_obj->size);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret != 0) {
drm_gem_object_unreference(target_obj);
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
exec_len = (uint32_t) exec->batch_len;
+ trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno);
+
count = nbox ? nbox : 1;
for (i = 0; i < count; i++) {
i915_verify_inactive(dev, __FILE__, __LINE__);
- if (dev_priv->mm.wedged) {
+ if (atomic_read(&dev_priv->mm.wedged)) {
DRM_ERROR("Execbuf while wedged\n");
mutex_unlock(&dev->struct_mutex);
ret = -EIO;
/* error other than GTT full, or we've already tried again */
if (ret != -ENOSPC || pin_tries >= 1) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Failed to pin buffers %d\n", ret);
+ if (ret != -ERESTARTSYS) {
+ unsigned long long total_size = 0;
+ for (i = 0; i < args->buffer_count; i++)
+ total_size += object_list[i]->size;
+ DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
+ pinned+1, args->buffer_count,
+ total_size, ret);
+ DRM_ERROR("%d objects [%d pinned], "
+ "%d object bytes [%d pinned], "
+ "%d/%d gtt bytes\n",
+ atomic_read(&dev->object_count),
+ atomic_read(&dev->pin_count),
+ atomic_read(&dev->object_memory),
+ atomic_read(&dev->pin_memory),
+ atomic_read(&dev->gtt_memory),
+ dev->gtt_total);
+ }
goto err;
}
/* evict everyone we can from the aperture */
ret = i915_gem_evict_everything(dev);
- if (ret)
+ if (ret && ret != -ENOSPC)
goto err;
}
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
+ uint32_t old_write_domain = obj->write_domain;
obj->write_domain = obj->pending_write_domain;
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
i915_verify_inactive(dev, __FILE__, __LINE__);
if (obj_priv->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment);
- if (ret != 0) {
- if (ret != -EBUSY && ret != -ERESTARTSYS)
- DRM_ERROR("Failure to bind: %d\n", ret);
+ if (ret)
return ret;
- }
}
/*
* Pre-965 chips need a fence register set up in order to
}
obj_priv = obj->driver_private;
+ if (obj_priv->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to pin a purgeable buffer\n");
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
args->handle);
return i915_gem_ring_throttle(dev, file_priv);
}
+ int
+ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+ {
+ struct drm_i915_gem_madvise *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ switch (args->madv) {
+ case I915_MADV_DONTNEED:
+ case I915_MADV_WILLNEED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
+ args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ obj_priv = obj->driver_private;
+
+ if (obj_priv->pin_count) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
+ return -EINVAL;
+ }
+
+ if (obj_priv->madv != __I915_MADV_PURGED)
+ obj_priv->madv = args->madv;
+
+ /* if the object is no longer bound, discard its backing storage */
+ if (i915_gem_object_is_purgeable(obj_priv) &&
+ obj_priv->gtt_space == NULL)
+ i915_gem_object_truncate(obj);
+
+ args->retained = obj_priv->madv != __I915_MADV_PURGED;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+ }
+
int i915_gem_init_object(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj_priv->list);
INIT_LIST_HEAD(&obj_priv->fence_list);
+ obj_priv->madv = I915_MADV_WILLNEED;
+
+ trace_i915_gem_object_create(obj);
return 0;
}
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ trace_i915_gem_object_destroy(obj);
+
while (obj_priv->pin_count > 0)
i915_gem_object_unpin(obj);
i915_gem_object_unbind(obj);
- i915_gem_free_mmap_offset(obj);
+ if (obj_priv->mmap_offset)
+ i915_gem_free_mmap_offset(obj);
kfree(obj_priv->page_cpu_valid);
kfree(obj_priv->bit_17);
kfree(obj->driver_private);
}
- /** Unbinds all objects that are on the given buffer list. */
+ /** Unbinds all inactive objects. */
static int
- i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
+ i915_gem_evict_from_inactive_list(struct drm_device *dev)
{
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
+ drm_i915_private_t *dev_priv = dev->dev_private;
- while (!list_empty(head)) {
- obj_priv = list_first_entry(head,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
+ while (!list_empty(&dev_priv->mm.inactive_list)) {
+ struct drm_gem_object *obj;
+ int ret;
- if (obj_priv->pin_count != 0) {
- DRM_ERROR("Pinned object in unbind list\n");
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
+ obj = list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+ list)->obj;
ret = i915_gem_object_unbind(obj);
if (ret != 0) {
- DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
- ret);
- mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("Error unbinding object: %d\n", ret);
return ret;
}
}
-
return 0;
}
* We need to replace this with a semaphore, or something.
*/
dev_priv->mm.suspended = 1;
+ del_timer(&dev_priv->hangcheck_timer);
/* Cancel the retire work handler, wait for it to finish if running
*/
if (last_seqno == cur_seqno) {
if (stuck++ > 100) {
DRM_ERROR("hardware wedged\n");
- dev_priv->mm.wedged = 1;
+ atomic_set(&dev_priv->mm.wedged, 1);
DRM_WAKEUP(&dev_priv->irq_queue);
break;
}
i915_gem_retire_requests(dev);
spin_lock(&dev_priv->mm.active_list_lock);
- if (!dev_priv->mm.wedged) {
+ if (!atomic_read(&dev_priv->mm.wedged)) {
/* Active and flushing should now be empty as we've
* waited for a sequence higher than any pending execbuffer
*/
* the GPU domains and just stuff them onto inactive.
*/
while (!list_empty(&dev_priv->mm.active_list)) {
- struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ uint32_t old_write_domain;
- obj_priv = list_first_entry(&dev_priv->mm.active_list,
- struct drm_i915_gem_object,
- list);
- obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj_priv->obj);
+ obj = list_first_entry(&dev_priv->mm.active_list,
+ struct drm_i915_gem_object,
+ list)->obj;
+ old_write_domain = obj->write_domain;
+ obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+ i915_gem_object_move_to_inactive(obj);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
spin_unlock(&dev_priv->mm.active_list_lock);
while (!list_empty(&dev_priv->mm.flushing_list)) {
- struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ uint32_t old_write_domain;
- obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- list);
- obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj_priv->obj);
+ obj = list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ list)->obj;
+ old_write_domain = obj->write_domain;
+ obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+ i915_gem_object_move_to_inactive(obj);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/* Move all inactive buffers out of the GTT. */
- ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+ ret = i915_gem_evict_from_inactive_list(dev);
WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
if (ret) {
mutex_unlock(&dev->struct_mutex);
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- if (dev_priv->mm.wedged) {
+ if (atomic_read(&dev_priv->mm.wedged)) {
DRM_ERROR("Reenabling wedged hardware, good luck\n");
- dev_priv->mm.wedged = 0;
+ atomic_set(&dev_priv->mm.wedged, 0);
}
mutex_lock(&dev->struct_mutex);
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- int ret;
-
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- ret = i915_gem_idle(dev);
drm_irq_uninstall(dev);
-
- return ret;
+ return i915_gem_idle(dev);
}
void
i915_gem_retire_work_handler);
dev_priv->mm.next_gem_seqno = 1;
+ spin_lock(&shrink_list_lock);
+ list_add(&dev_priv->mm.shrink_list, &shrink_list);
+ spin_unlock(&shrink_list_lock);
+
/* Old X drivers will take 0-2 for front, back, depth buffers */
dev_priv->fence_reg_start = 3;
list_del_init(i915_file_priv->mm.request_list.next);
mutex_unlock(&dev->struct_mutex);
}
+
+ static int
+ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
+ {
+ drm_i915_private_t *dev_priv, *next_dev;
+ struct drm_i915_gem_object *obj_priv, *next_obj;
+ int cnt = 0;
+ int would_deadlock = 1;
+
+ /* "fast-path" to count number of available objects */
+ if (nr_to_scan == 0) {
+ spin_lock(&shrink_list_lock);
+ list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
+ struct drm_device *dev = dev_priv->dev;
+
+ if (mutex_trylock(&dev->struct_mutex)) {
+ list_for_each_entry(obj_priv,
+ &dev_priv->mm.inactive_list,
+ list)
+ cnt++;
+ mutex_unlock(&dev->struct_mutex);
+ }
+ }
+ spin_unlock(&shrink_list_lock);
+
+ return (cnt / 100) * sysctl_vfs_cache_pressure;
+ }
+
+ spin_lock(&shrink_list_lock);
+
+ /* first scan for clean buffers */
+ list_for_each_entry_safe(dev_priv, next_dev,
+ &shrink_list, mm.shrink_list) {
+ struct drm_device *dev = dev_priv->dev;
+
+ if (! mutex_trylock(&dev->struct_mutex))
+ continue;
+
+ spin_unlock(&shrink_list_lock);
+
+ i915_gem_retire_requests(dev);
+
+ list_for_each_entry_safe(obj_priv, next_obj,
+ &dev_priv->mm.inactive_list,
+ list) {
+ if (i915_gem_object_is_purgeable(obj_priv)) {
+ i915_gem_object_unbind(obj_priv->obj);
+ if (--nr_to_scan <= 0)
+ break;
+ }
+ }
+
+ spin_lock(&shrink_list_lock);
+ mutex_unlock(&dev->struct_mutex);
+
+ would_deadlock = 0;
+
+ if (nr_to_scan <= 0)
+ break;
+ }
+
+ /* second pass, evict/count anything still on the inactive list */
+ list_for_each_entry_safe(dev_priv, next_dev,
+ &shrink_list, mm.shrink_list) {
+ struct drm_device *dev = dev_priv->dev;
+
+ if (! mutex_trylock(&dev->struct_mutex))
+ continue;
+
+ spin_unlock(&shrink_list_lock);
+
+ list_for_each_entry_safe(obj_priv, next_obj,
+ &dev_priv->mm.inactive_list,
+ list) {
+ if (nr_to_scan > 0) {
+ i915_gem_object_unbind(obj_priv->obj);
+ nr_to_scan--;
+ } else
+ cnt++;
+ }
+
+ spin_lock(&shrink_list_lock);
+ mutex_unlock(&dev->struct_mutex);
+
+ would_deadlock = 0;
+ }
+
+ spin_unlock(&shrink_list_lock);
+
+ if (would_deadlock)
+ return -1;
+ else if (cnt > 0)
+ return (cnt / 100) * sysctl_vfs_cache_pressure;
+ else
+ return 0;
+ }
+
+ static struct shrinker shrinker = {
+ .shrink = i915_gem_shrink,
+ .seeks = DEFAULT_SEEKS,
+ };
+
+ __init void
+ i915_gem_shrinker_init(void)
+ {
+ register_shrinker(&shrinker);
+ }
+
+ __exit void
+ i915_gem_shrinker_exit(void)
+ {
+ unregister_shrinker(&shrinker);
+ }
* fb aperture size and the amount of pre-reserved memory.
*/
#define INTEL_GMCH_CTRL 0x52
+#define INTEL_GMCH_VGA_DISABLE (1 << 1)
#define INTEL_GMCH_ENABLED 0x4
#define INTEL_GMCH_MEM_MASK 0x1
#define INTEL_GMCH_MEM_64M 0x1
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
#define LBB 0xf4
+ #define GDRST 0xc0
+ #define GDRST_FULL (0<<2)
+ #define GDRST_RENDER (1<<2)
+ #define GDRST_MEDIA (3<<2)
/* VGA stuff */
#define FBC_CTL_PLANEA (0<<0)
#define FBC_CTL_PLANEB (1<<0)
#define FBC_FENCE_OFF 0x0321b
+ #define FBC_TAG 0x03300
#define FBC_LL_SIZE (1536)
+ /* Framebuffer compression for GM45+ */
+ #define DPFC_CB_BASE 0x3200
+ #define DPFC_CONTROL 0x3208
+ #define DPFC_CTL_EN (1<<31)
+ #define DPFC_CTL_PLANEA (0<<30)
+ #define DPFC_CTL_PLANEB (1<<30)
+ #define DPFC_CTL_FENCE_EN (1<<29)
+ #define DPFC_SR_EN (1<<10)
+ #define DPFC_CTL_LIMIT_1X (0<<6)
+ #define DPFC_CTL_LIMIT_2X (1<<6)
+ #define DPFC_CTL_LIMIT_4X (2<<6)
+ #define DPFC_RECOMP_CTL 0x320c
+ #define DPFC_RECOMP_STALL_EN (1<<27)
+ #define DPFC_RECOMP_STALL_WM_SHIFT (16)
+ #define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
+ #define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
+ #define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f)
+ #define DPFC_STATUS 0x3210
+ #define DPFC_INVAL_SEG_SHIFT (16)
+ #define DPFC_INVAL_SEG_MASK (0x07ff0000)
+ #define DPFC_COMP_SEG_SHIFT (0)
+ #define DPFC_COMP_SEG_MASK (0x000003ff)
+ #define DPFC_STATUS2 0x3214
+ #define DPFC_FENCE_YOFF 0x3218
+ #define DPFC_CHICKEN 0x3224
+ #define DPFC_HT_MODIFY (1<<31)
+
/*
* GPIO regs
*/
#define PF_ENABLE (1<<31)
#define PFA_WIN_SZ 0x68074
#define PFB_WIN_SZ 0x68874
+ #define PFA_WIN_POS 0x68070
+ #define PFB_WIN_POS 0x68870
/* legacy palette */
#define LGC_PALETTE_A 0x4a000
* Eric Anholt <eric@anholt.net>
*/
+ #include <linux/module.h>
+ #include <linux/input.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include "drmP.h"
refclk, best_clock);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
clock.p2 = limit->p2.p2_fast;
else
mdelay(20);
}
+ /* Parameters have changed, update FBC info */
+ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ {
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane, i;
+ u32 fbc_ctl, fbc_ctl2;
+
+ dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+
+ if (fb->pitch < dev_priv->cfb_pitch)
+ dev_priv->cfb_pitch = fb->pitch;
+
+ /* FBC_CTL wants 64B units */
+ dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
+ dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_plane = intel_crtc->plane;
+ plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
+
+ /* Clear old tags */
+ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+ I915_WRITE(FBC_TAG + (i * 4), 0);
+
+ /* Set it up... */
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
+ if (obj_priv->tiling_mode != I915_TILING_NONE)
+ fbc_ctl2 |= FBC_CTL_CPU_FENCE;
+ I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+ I915_WRITE(FBC_FENCE_OFF, crtc->y);
+
+ /* enable it... */
+ fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+ fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
+ if (obj_priv->tiling_mode != I915_TILING_NONE)
+ fbc_ctl |= dev_priv->cfb_fence;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ",
+ dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
+ }
+
+ void i8xx_disable_fbc(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+
+ if (!I915_HAS_FBC(dev))
+ return;
+
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING)
+ ; /* nothing */
+
+ intel_wait_for_vblank(dev);
+
+ DRM_DEBUG("disabled FBC\n");
+ }
+
+ static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
+ {
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+ }
+
+ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ {
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
+ DPFC_CTL_PLANEB);
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
+ dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_plane = intel_crtc->plane;
+
+ dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
+ if (obj_priv->tiling_mode != I915_TILING_NONE) {
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
+ I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
+ } else {
+ I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
+ }
+
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+
+ /* enable it... */
+ I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
+
+ DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane);
+ }
+
+ void g4x_disable_fbc(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ intel_wait_for_vblank(dev);
+
+ DRM_DEBUG("disabled FBC\n");
+ }
+
+ static bool g4x_fbc_enabled(struct drm_crtc *crtc)
+ {
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+ }
+
+ /**
+ * intel_update_fbc - enable/disable FBC as needed
+ * @crtc: CRTC to point the compressor at
+ * @mode: mode in use
+ *
+ * Set up the framebuffer compression hardware at mode set time. We
+ * enable it if possible:
+ * - plane A only (on pre-965)
+ * - no pixel mulitply/line duplication
+ * - no alpha buffer discard
+ * - no dual wide
+ * - framebuffer <= 2048 in width, 1536 in height
+ *
+ * We can't assume that any compression will take place (worst case),
+ * so the compressed buffer has to be the same size as the uncompressed
+ * one. It also must reside (along with the line length buffer) in
+ * stolen memory.
+ *
+ * We need to enable/disable FBC on a global basis.
+ */
+ static void intel_update_fbc(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+ {
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane;
+
+ if (!i915_powersave)
+ return;
+
+ if (!dev_priv->display.fbc_enabled ||
+ !dev_priv->display.enable_fbc ||
+ !dev_priv->display.disable_fbc)
+ return;
+
+ if (!crtc->fb)
+ return;
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj_priv = intel_fb->obj->driver_private;
+
+ /*
+ * If FBC is already on, we just have to verify that we can
+ * keep it that way...
+ * Need to disable if:
+ * - changing FBC params (stride, fence, mode)
+ * - new fb is too large to fit in compressed buffer
+ * - going to an unsupported config (interlace, pixel multiply, etc.)
+ */
+ if (intel_fb->obj->size > dev_priv->cfb_size) {
+ DRM_DEBUG("framebuffer too large, disabling compression\n");
+ goto out_disable;
+ }
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
+ DRM_DEBUG("mode incompatible with compression, disabling\n");
+ goto out_disable;
+ }
+ if ((mode->hdisplay > 2048) ||
+ (mode->vdisplay > 1536)) {
+ DRM_DEBUG("mode too large for compression, disabling\n");
+ goto out_disable;
+ }
+ if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
+ DRM_DEBUG("plane not 0, disabling compression\n");
+ goto out_disable;
+ }
+ if (obj_priv->tiling_mode != I915_TILING_X) {
+ DRM_DEBUG("framebuffer not tiled, disabling compression\n");
+ goto out_disable;
+ }
+
+ if (dev_priv->display.fbc_enabled(crtc)) {
+ /* We can re-enable it in this case, but need to update pitch */
+ if (fb->pitch > dev_priv->cfb_pitch)
+ dev_priv->display.disable_fbc(dev);
+ if (obj_priv->fence_reg != dev_priv->cfb_fence)
+ dev_priv->display.disable_fbc(dev);
+ if (plane != dev_priv->cfb_plane)
+ dev_priv->display.disable_fbc(dev);
+ }
+
+ if (!dev_priv->display.fbc_enabled(crtc)) {
+ /* Now try to turn it back on if possible */
+ dev_priv->display.enable_fbc(crtc, 500);
+ }
+
+ return;
+
+ out_disable:
+ DRM_DEBUG("unsupported config, disabling FBC\n");
+ /* Multiple disables should be harmless */
+ if (dev_priv->display.fbc_enabled(crtc))
+ dev_priv->display.disable_fbc(dev);
+ }
+
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
struct drm_i915_gem_object *obj_priv;
struct drm_gem_object *obj;
int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
unsigned long Start, Offset;
- int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR);
- int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dsptileoff = (pipe == 0 ? DSPATILEOFF : DSPBTILEOFF);
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
+ int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
+ int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
+ int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
+ int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr, alignment;
int ret;
return 0;
}
- switch (pipe) {
+ switch (plane) {
case 0:
case 1:
break;
default:
- DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
+ DRM_ERROR("Can't update plane %d in SAREA\n", plane);
return -EINVAL;
}
I915_READ(dspbase);
}
+ if ((IS_I965G(dev) || plane == 0))
+ intel_update_fbc(crtc, &crtc->mode);
+
intel_wait_for_vblank(dev);
if (old_fb) {
int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
+ int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS;
int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
}
}
+ /* Enable panel fitting for LVDS */
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(pf_ctl_reg);
+ I915_WRITE(pf_ctl_reg, temp | PF_ENABLE);
+
+ /* currently full aspect */
+ I915_WRITE(pf_win_pos, 0);
+
+ I915_WRITE(pf_win_size,
+ (dev_priv->panel_fixed_mode->hdisplay << 16) |
+ (dev_priv->panel_fixed_mode->vdisplay));
+ }
+
/* Enable CPU pipe */
temp = I915_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) == 0) {
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR;
+ int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
+ int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
u32 temp;
intel_crtc_load_lut(crtc);
+ if ((IS_I965G(dev) || plane == 0))
+ intel_update_fbc(crtc, &crtc->mode);
+
/* Give the overlay scaler a chance to enable if it's on this pipe */
//intel_crtc_dpms_video(crtc, true); TODO
intel_update_watermarks(dev);
/* Give the overlay scaler a chance to disable if it's on this pipe */
//intel_crtc_dpms_video(crtc, FALSE); TODO
+ if (dev_priv->cfb_plane == plane &&
+ dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
+
/* Disable the VGA plane that we never use */
i915_disable_vga(dev);
static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
bool enabled;
- if (IS_IGDNG(dev))
- igdng_crtc_dpms(crtc, mode);
- else
- i9xx_crtc_dpms(crtc, mode);
+ dev_priv->display.dpms(crtc, mode);
intel_crtc->dpms_mode = mode;
return true;
}
+ static int i945_get_display_clock_speed(struct drm_device *dev)
+ {
+ return 400000;
+ }
- /** Returns the core display clock speed for i830 - i945 */
- static int intel_get_core_clock_speed(struct drm_device *dev)
+ static int i915_get_display_clock_speed(struct drm_device *dev)
{
+ return 333000;
+ }
- /* Core clock values taken from the published datasheets.
- * The 830 may go up to 166 Mhz, which we should check.
- */
- if (IS_I945G(dev))
- return 400000;
- else if (IS_I915G(dev))
- return 333000;
- else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
- return 200000;
- else if (IS_I915GM(dev)) {
- u16 gcfgc = 0;
+ static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
+ {
+ return 200000;
+ }
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+ static int i915gm_get_display_clock_speed(struct drm_device *dev)
+ {
+ u16 gcfgc = 0;
- if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
- return 133000;
- else {
- switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
- case GC_DISPLAY_CLOCK_333_MHZ:
- return 333000;
- default:
- case GC_DISPLAY_CLOCK_190_200_MHZ:
- return 190000;
- }
- }
- } else if (IS_I865G(dev))
- return 266000;
- else if (IS_I855(dev)) {
- u16 hpllcc = 0;
- /* Assume that the hardware is in the high speed state. This
- * should be the default.
- */
- switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
- case GC_CLOCK_133_200:
- case GC_CLOCK_100_200:
- return 200000;
- case GC_CLOCK_166_250:
- return 250000;
- case GC_CLOCK_100_133:
- return 133000;
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
+ return 133000;
+ else {
+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+ case GC_DISPLAY_CLOCK_333_MHZ:
+ return 333000;
+ default:
+ case GC_DISPLAY_CLOCK_190_200_MHZ:
+ return 190000;
}
- } else /* 852, 830 */
+ }
+ }
+
+ static int i865_get_display_clock_speed(struct drm_device *dev)
+ {
+ return 266000;
+ }
+
+ static int i855_get_display_clock_speed(struct drm_device *dev)
+ {
+ u16 hpllcc = 0;
+ /* Assume that the hardware is in the high speed state. This
+ * should be the default.
+ */
+ switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
+ case GC_CLOCK_133_200:
+ case GC_CLOCK_100_200:
+ return 200000;
+ case GC_CLOCK_166_250:
+ return 250000;
+ case GC_CLOCK_100_133:
return 133000;
+ }
+
+ /* Shouldn't happen */
+ return 0;
+ }
- return 0; /* Silence gcc warning */
+ static int i830_get_display_clock_speed(struct drm_device *dev)
+ {
+ return 133000;
}
/**
{
long entries_required, wm_size;
- entries_required = (clock_in_khz * pixel_size * latency_ns) / 1000000;
+ /*
+ * Note: we need to make sure we don't overflow for various clock &
+ * latency values.
+ * clocks go from a few thousand to several hundred thousand.
+ * latency is usually a few thousand
+ */
+ entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
+ 1000;
entries_required /= wm->cacheline_size;
DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required);
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
latency = &cxsr_latency_table[i];
if (is_desktop == latency->is_desktop &&
- fsb == latency->fsb_freq && mem == latency->mem_freq)
- break;
+ fsb == latency->fsb_freq && mem == latency->mem_freq)
+ return latency;
}
- if (i >= ARRAY_SIZE(cxsr_latency_table)) {
- DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
- return NULL;
- }
- return latency;
+
+ DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
+
+ return NULL;
}
static void igd_disable_cxsr(struct drm_device *dev)
*/
const static int latency_ns = 5000;
- static int intel_get_fifo_size(struct drm_device *dev, int plane)
+ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
- if (IS_I9XX(dev)) {
- if (plane == 0)
- size = dsparb & 0x7f;
- else
- size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
- (dsparb & 0x7f);
- } else if (IS_I85X(dev)) {
- if (plane == 0)
- size = dsparb & 0x1ff;
- else
- size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
- (dsparb & 0x1ff);
- size >>= 1; /* Convert to cachelines */
- } else if (IS_845G(dev)) {
+ if (plane == 0)
size = dsparb & 0x7f;
- size >>= 2; /* Convert to cachelines */
- } else {
- size = dsparb & 0x7f;
- size >>= 1; /* Convert to cachelines */
- }
+ else
+ size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
+ (dsparb & 0x7f);
+
+ DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
+ size);
+
+ return size;
+ }
+
+ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ if (plane == 0)
+ size = dsparb & 0x1ff;
+ else
+ size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
+ (dsparb & 0x1ff);
+ size >>= 1; /* Convert to cachelines */
DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
size);
return size;
}
- static void g4x_update_wm(struct drm_device *dev)
+ static int i845_get_fifo_size(struct drm_device *dev, int plane)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 2; /* Convert to cachelines */
+
+ DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
+ size);
+
+ return size;
+ }
+
+ static int i830_get_fifo_size(struct drm_device *dev, int plane)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
+ size);
+
+ return size;
+ }
+
+ static void g4x_update_wm(struct drm_device *dev, int unused, int unused2,
+ int unused3, int unused4)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 fw_blc_self = I915_READ(FW_BLC_SELF);
I915_WRITE(FW_BLC_SELF, fw_blc_self);
}
- static void i965_update_wm(struct drm_device *dev)
+ static void i965_update_wm(struct drm_device *dev, int unused, int unused2,
+ int unused3, int unused4)
{
struct drm_i915_private *dev_priv = dev->dev_private;
cacheline_size = planea_params.cacheline_size;
/* Update per-plane FIFO sizes */
- planea_params.fifo_size = intel_get_fifo_size(dev, 0);
- planeb_params.fifo_size = intel_get_fifo_size(dev, 1);
+ planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1);
planea_wm = intel_calculate_wm(planea_clock, &planea_params,
pixel_size, latency_ns);
I915_WRITE(FW_BLC2, fwater_hi);
}
- static void i830_update_wm(struct drm_device *dev, int planea_clock,
- int pixel_size)
+ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
+ int unused2, int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
int planea_wm;
- i830_wm_info.fifo_size = intel_get_fifo_size(dev, 0);
+ i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info,
pixel_size, latency_ns);
*/
static void intel_update_watermarks(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
int sr_hdisplay = 0;
else if (IS_IGD(dev))
igd_disable_cxsr(dev);
- if (IS_G4X(dev))
- g4x_update_wm(dev);
- else if (IS_I965G(dev))
- i965_update_wm(dev);
- else if (IS_I9XX(dev) || IS_MOBILE(dev))
- i9xx_update_wm(dev, planea_clock, planeb_clock, sr_hdisplay,
- pixel_size);
- else
- i830_update_wm(dev, planea_clock, pixel_size);
+ dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
+ sr_hdisplay, pixel_size);
}
static int intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
int fp_reg = (pipe == 0) ? FPA0 : FPB0;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
- int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+ int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
+ int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
int refclk, num_outputs = 0;
intel_clock_t clock, reduced_clock;
enable color space conversion */
if (!IS_IGDNG(dev)) {
if (pipe == 0)
- dspcntr |= DISPPLANE_SEL_PIPE_A;
+ dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
dspcntr |= DISPPLANE_SEL_PIPE_B;
}
* XXX: No double-wide on 915GM pipe B. Is that the only reason for the
* pipe == 0 check?
*/
- if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10)
+ if (mode->clock >
+ dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
pipeconf |= PIPEACONF_DOUBLE_WIDE;
else
pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
udelay(150);
if (IS_I965G(dev) && !IS_IGDNG(dev)) {
- sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
- I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
+ if (is_sdvo) {
+ sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+ I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+ } else
+ I915_WRITE(dpll_md_reg, 0);
} else {
/* write it again -- the BIOS does, after all */
I915_WRITE(dpll_reg, dpll);
/* Flush the plane changes */
ret = intel_pipe_set_base(crtc, x, y, old_fb);
+ if ((IS_I965G(dev) || plane == 0))
+ intel_update_fbc(crtc, &crtc->mode);
+
intel_update_watermarks(dev);
drm_vblank_post_modeset(dev, pipe);
struct drm_gem_object *bo;
struct drm_i915_gem_object *obj_priv;
int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
uint32_t temp = I915_READ(control);
i915_gem_object_unpin(intel_crtc->cursor_bo);
drm_gem_object_unreference(intel_crtc->cursor_bo);
}
+
+ if ((IS_I965G(dev) || plane == 0))
+ intel_update_fbc(crtc, &crtc->mode);
+
mutex_unlock(&dev->struct_mutex);
intel_crtc->cursor_addr = addr;
intel_crtc->lut_b[i] = i;
}
+ /* Swap pipes & planes for FBC on pre-965 */
+ intel_crtc->pipe = pipe;
+ intel_crtc->plane = pipe;
+ if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
+ DRM_DEBUG("swapping pipes & planes for FBC\n");
+ intel_crtc->plane = ((pipe == 0) ? 1 : 0);
+ }
+
intel_crtc->cursor_addr = 0;
intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
}
}
+ /* Set up chip specific display functions */
+ static void intel_init_display(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* We always want a DPMS function */
+ if (IS_IGDNG(dev))
+ dev_priv->display.dpms = igdng_crtc_dpms;
+ else
+ dev_priv->display.dpms = i9xx_crtc_dpms;
+
+ /* Only mobile has FBC, leave pointers NULL for other chips */
+ if (IS_MOBILE(dev)) {
+ if (IS_GM45(dev)) {
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_enable_fbc;
+ dev_priv->display.disable_fbc = g4x_disable_fbc;
+ } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) {
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_enable_fbc;
+ dev_priv->display.disable_fbc = i8xx_disable_fbc;
+ }
+ /* 855GM needs testing */
+ }
+
+ /* Returns the core display clock speed */
+ if (IS_I945G(dev))
+ dev_priv->display.get_display_clock_speed =
+ i945_get_display_clock_speed;
+ else if (IS_I915G(dev))
+ dev_priv->display.get_display_clock_speed =
+ i915_get_display_clock_speed;
+ else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
+ dev_priv->display.get_display_clock_speed =
+ i9xx_misc_get_display_clock_speed;
+ else if (IS_I915GM(dev))
+ dev_priv->display.get_display_clock_speed =
+ i915gm_get_display_clock_speed;
+ else if (IS_I865G(dev))
+ dev_priv->display.get_display_clock_speed =
+ i865_get_display_clock_speed;
+ else if (IS_I855(dev))
+ dev_priv->display.get_display_clock_speed =
+ i855_get_display_clock_speed;
+ else /* 852, 830 */
+ dev_priv->display.get_display_clock_speed =
+ i830_get_display_clock_speed;
+
+ /* For FIFO watermark updates */
+ if (IS_G4X(dev))
+ dev_priv->display.update_wm = g4x_update_wm;
+ else if (IS_I965G(dev))
+ dev_priv->display.update_wm = i965_update_wm;
+ else if (IS_I9XX(dev) || IS_MOBILE(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ } else {
+ if (IS_I85X(dev))
+ dev_priv->display.get_fifo_size = i85x_get_fifo_size;
+ else if (IS_845G(dev))
+ dev_priv->display.get_fifo_size = i845_get_fifo_size;
+ else
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ dev_priv->display.update_wm = i830_update_wm;
+ }
+ }
+
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev->mode_config.funcs = (void *)&intel_mode_funcs;
+ intel_init_display(dev);
+
if (IS_I965G(dev)) {
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
mutex_unlock(&dev->struct_mutex);
+ if (dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
+
drm_mode_config_cleanup(dev);
}
return &intel_output->enc;
}
+
+/*
+ * set vga decode state - true == enable VGA decode
+ */
+int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 gmch_ctrl;
+
+ pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
+ if (state)
+ gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+ else
+ gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+ pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
+ return 0;
+}
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
+ #include "i915_drv.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#define INTEL_LVDS_CLONE_BIT 14
#define INTEL_DVO_TMDS_CLONE_BIT 15
#define INTEL_DVO_LVDS_CLONE_BIT 16
+#define INTEL_EDP_CLONE_BIT 17
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
struct intel_crtc {
struct drm_crtc base;
- int pipe;
- int plane;
+ enum pipe pipe;
+ enum plane plane;
struct drm_gem_object *cursor_bo;
uint32_t cursor_addr;
u8 lut_r[256], lut_g[256], lut_b[256];
struct drm_mode_fb_cmd *mode_cmd,
struct drm_framebuffer **fb,
struct drm_gem_object *obj);
+
#endif /* __INTEL_DRV_H__ */