*
*/
+#include <mach/hardware.h>
#include <asm/hardware/clps7111.h>
.macro addruart, rx, tmp
unsigned long __pfn_to_bus(unsigned long pfn)
{
- return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET));
+ return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET);
}
EXPORT_SYMBOL(__pfn_to_bus);
*
*/
- .equ io_virt, IO_BASE
- .equ io_phys, IO_START
+#include <mach/hardware.h>
+
+ .equ io_virt, IO_VIRT
+ .equ io_phys, IO_PHYS
.macro addruart, rx, tmp
mrc p15, 0, \rx, c1, c0
},
};
-void qnap_tsx1x_register_flash(void)
+void __init qnap_tsx1x_register_flash(void)
{
spi_register_board_info(qnap_tsx1x_spi_slave_info,
ARRAY_SIZE(qnap_tsx1x_spi_slave_info));
#ifndef __ARCH_KIRKWOOD_TSX1X_COMMON_H
#define __ARCH_KIRKWOOD_TSX1X_COMMON_H
-extern void qnap_tsx1x_register_flash(void);
+extern void __init qnap_tsx1x_register_flash(void);
extern void qnap_tsx1x_power_off(void);
#endif
* the Free Software Foundation.
*/
#include <mach/hardware.h>
+#include <asm/memory.h>
#include <mach/regs-board-a9m9750dev.h>
/* nothing */
}
+static int timeout;
+
static void putc_ns9360(char c, void __iomem *base)
{
- static int t = 0x10000;
do {
- if (t)
- --t;
+ if (timeout)
+ --timeout;
if (__raw_readl(base + 8) & (1 << 3)) {
__raw_writeb(c, base + 16);
- t = 0x10000;
+ timeout = 0x10000;
break;
}
- } while (t);
+ } while (timeout);
}
static void putc_a9m9750dev(char c, void __iomem *base)
{
- static int t = 0x10000;
do {
- if (t)
- --t;
+ if (timeout)
+ --timeout;
if (__raw_readb(base + 5) & (1 << 5)) {
__raw_writeb(c, base);
- t = 0x10000;
+ timeout = 0x10000;
break;
}
- } while (t);
+ } while (timeout);
}
static void putc_ns921x(char c, void __iomem *base)
{
- static int t = 0x10000;
do {
- if (t)
- --t;
+ if (timeout)
+ --timeout;
if (!(__raw_readl(base) & (1 << 11))) {
__raw_writeb(c, base + 0x0028);
- t = 0x10000;
+ timeout = 0x10000;
break;
}
- } while (t);
+ } while (timeout);
}
#define MSCS __REG(0xA0900184)
static void autodetect(void (**putc)(char, void __iomem *), void __iomem **base)
{
+ timeout = 0x10000;
if (((__raw_readl(MSCS) >> 16) & 0xfe) == 0x00) {
/* ns9360 or ns9750 */
if (NS9360_UART_ENABLED(NS9360_UARTA)) {
#include <mach/colibri.h>
#include <mach/ohci.h>
#include <mach/pxafb.h>
+#include <mach/audio.h>
#include "generic.h"
#include "devices.h"
static inline void colibri_pxa300_init_lcd(void) {}
#endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */
-#if defined(SND_AC97_CODEC) || defined(SND_AC97_CODEC_MODULE)
+#if defined(CONFIG_SND_AC97_CODEC) || defined(CONFIG_SND_AC97_CODEC_MODULE)
static mfp_cfg_t colibri_pxa310_ac97_pin_config[] __initdata = {
GPIO24_AC97_SYSCLK,
GPIO23_AC97_nACRESET,
static struct pxamci_platform_data corgi_mci_platform_data = {
.detect_delay_ms = 250,
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .gpio_card_detect = -1,
+ .gpio_card_detect = CORGI_GPIO_nSD_DETECT,
.gpio_card_ro = CORGI_GPIO_nSD_WP,
.gpio_power = CORGI_GPIO_SD_PWR,
};
static u32 mdrefr_dri(unsigned int freq)
{
- u32 dri = 0;
+ u32 interval = freq * SDRAM_TREF / sdram_rows;
- if (cpu_is_pxa25x())
- dri = ((freq * SDRAM_TREF) / (sdram_rows * 32));
- if (cpu_is_pxa27x())
- dri = ((freq * SDRAM_TREF) / (sdram_rows - 31)) / 32;
- return dri;
+ return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
}
/* find a valid frequency point */
EXPORT_SYMBOL(pxa27x_clear_otgph);
static unsigned long ac97_reset_config[] = {
- GPIO95_AC97_nRESET,
- GPIO95_GPIO,
- GPIO113_AC97_nRESET,
GPIO113_GPIO,
+ GPIO113_AC97_nRESET,
+ GPIO95_GPIO,
+ GPIO95_AC97_nRESET,
};
void pxa27x_assert_ac97reset(int reset_gpio, int on)
strb \rd, [\rx]
.endm
+ .macro waituart,rd,rx
+ .endm
+
.macro busyuart,rd,rx
mov \rd, #0
1001: add \rd, \rd, #1
DEF_CLKLOOK(&clk_kpi, "nuc900-kpi", NULL),
DEF_CLKLOOK(&clk_wdt, "nuc900-wdt", NULL),
DEF_CLKLOOK(&clk_gdma, "nuc900-gdma", NULL),
- DEF_CLKLOOK(&clk_adc, "nuc900-adc", NULL),
+ DEF_CLKLOOK(&clk_adc, "nuc900-ts", NULL),
DEF_CLKLOOK(&clk_usi, "nuc900-spi", NULL),
DEF_CLKLOOK(&clk_ext, NULL, "ext"),
DEF_CLKLOOK(&clk_timer0, NULL, "timer0"),
.macro addruart, rx
mrc p15, 0, \rx, c1, c0
tst \rx, #1 @ MMU enabled?
- moveq \rx, =SPEAR_DBG_UART_BASE @ Physical base
- movne \rx, =VA_SPEAR_DBG_UART_BASE @ Virtual base
+ moveq \rx, #SPEAR_DBG_UART_BASE @ Physical base
+ movne \rx, #VA_SPEAR_DBG_UART_BASE @ Virtual base
.endm
.macro senduart, rd, rx
/* advance table_gfn when emulating 1gb pages with 4k */
if (delta == 0)
table_gfn += PT_INDEX(addr, level);
+ access &= gw->pte_access;
} else {
direct = 0;
table_gfn = gw->table_gfn[level - 2];
r = -ENOMEM;
size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
- entries = vmalloc(size);
+ entries = kmalloc(size, GFP_KERNEL);
if (!entries)
goto out;
r = n;
out_free:
- vfree(entries);
+ kfree(entries);
out:
return r;
}
/* G33's GTT size defined in gmch_ctrl */
pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
- switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
- case G33_PGETBL_SIZE_1M:
+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+ case I830_GMCH_GMS_STOLEN_512:
+ size = 512;
+ break;
+ case I830_GMCH_GMS_STOLEN_1024:
size = 1024;
break;
- case G33_PGETBL_SIZE_2M:
- size = 2048;
+ case I830_GMCH_GMS_STOLEN_8192:
+ size = 8*1024;
break;
default:
dev_info(&agp_bridge->dev->dev,
"unknown page table size 0x%x, assuming 512KB\n",
- (gmch_ctrl & G33_PGETBL_SIZE_MASK));
+ (gmch_ctrl & I830_GMCH_GMS_MASK));
size = 512;
}
} else {
static int tpm_tis_pnp_resume(struct pnp_dev *dev)
{
- return tpm_pm_resume(&dev->dev);
+ struct tpm_chip *chip = pnp_get_drvdata(dev);
+ int ret;
+
+ ret = tpm_pm_resume(&dev->dev);
+ if (!ret)
+ tpm_continue_selftest(chip);
+
+ return ret;
}
static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
if (devno == 0)
return -ENODEV;
- i7core_printk(KERN_ERR,
+ i7core_printk(KERN_INFO,
"Device not found: dev %02x.%d PCI ID %04x:%04x\n",
dev_descr->dev, dev_descr->func,
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
case FBC_NOT_TILED:
seq_printf(m, "scanout buffer not tiled");
break;
+ case FBC_MULTIPLE_PIPES:
+ seq_printf(m, "multiple pipes are enabled");
+ break;
default:
seq_printf(m, "unknown reason");
}
struct drm_i915_private *dev_priv = dev->dev_private;
drm_mm_put_block(dev_priv->compressed_fb);
- if (!IS_GM45(dev))
+ if (dev_priv->compressed_llb)
drm_mm_put_block(dev_priv->compressed_llb);
}
FBC_MODE_TOO_LARGE, /* mode too large for compression */
FBC_BAD_PLANE, /* fbc not supported on plane */
FBC_NOT_TILED, /* buffer not tiled */
+ FBC_MULTIPLE_PIPES, /* more than one pipe active */
};
enum intel_pch {
PCH_CPT, /* Cougarpoint PCH */
};
+#define QUIRK_PIPEA_FORCE (1<<0)
+
struct intel_fbdev;
typedef struct drm_i915_private {
/* PCH chipset type */
enum intel_pch pch_type;
+ unsigned long quirks;
+
/* Register state */
bool modeset_on_lid;
u8 saveLBB;
return ret;
}
+
int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv,
unsigned long long total_size = 0;
int num_fences = 0;
for (i = 0; i < args->buffer_count; i++) {
- obj_priv = object_list[i]->driver_private;
+ obj_priv = to_intel_bo(object_list[i]);
total_size += object_list[i]->size;
num_fences +=
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
+#define PANEL_UNLOCK_REGS (0xabcd << 16)
#define EDP_FORCE_VDD (1 << 3)
#define EDP_BLC_ENABLE (1 << 2)
#define PANEL_POWER_RESET (1 << 1)
intel_clock_t clock;
int max_n;
bool found;
- /* approximately equals target * 0.00488 */
- int err_most = (target >> 8) + (target >> 10);
+ /* approximately equals target * 0.00585 */
+ int err_most = (target >> 8) + (target >> 9);
found = false;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj_priv;
+ struct drm_crtc *tmp_crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane;
+ int crtcs_enabled = 0;
+
+ DRM_DEBUG_KMS("\n");
if (!i915_powersave)
return;
* If FBC is already on, we just have to verify that we can
* keep it that way...
* Need to disable if:
+ * - more than one pipe is active
* - changing FBC params (stride, fence, mode)
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
+ list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
+ if (tmp_crtc->enabled)
+ crtcs_enabled++;
+ }
+ DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled);
+ if (crtcs_enabled > 1) {
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+ goto out_disable;
+ }
if (intel_fb->obj->size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling "
"compression\n");
}
}
-static int
+int
intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
intel_wait_for_vblank(dev);
}
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipeconf_reg == PIPEACONF &&
+ (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ goto skip_pipe_off;
+
/* Next, disable display pipes */
temp = I915_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) != 0) {
I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
I915_READ(dpll_reg);
}
-
+ skip_pipe_off:
/* Wait for the clocks to turn off. */
udelay(150);
break;
if (mode->clock * 3 > 27000 * 4)
return MODE_CLOCK_HIGH;
}
-
- drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
if (dev_priv->lvds_dither) {
if (HAS_PCH_SPLIT(dev)) {
pipeconf |= PIPE_ENABLE_DITHER;
+ pipeconf &= ~PIPE_DITHER_TYPE_MASK;
pipeconf |= PIPE_DITHER_TYPE_ST01;
} else
lvds |= LVDS_ENABLE_DITHER;
DRM_DEBUG_DRIVER("upclocking LVDS\n");
/* Unlock panel regs */
- I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
+ PANEL_UNLOCK_REGS);
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
DRM_DEBUG_DRIVER("downclocking LVDS\n");
/* Unlock panel regs */
- I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
+ PANEL_UNLOCK_REGS);
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
struct drm_gem_object *obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
- unsigned long flags;
+ unsigned long flags, offset;
int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
int ret, pipesrc;
u32 flip_mask;
while (I915_READ(ISR) & flip_mask)
;
+ /* Offset into the new buffer for cases of shared fbs between CRTCs */
+ offset = obj_priv->gtt_offset;
+ offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8);
+
BEGIN_LP_RING(4);
if (IS_I965G(dev)) {
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+ OUT_RING(offset | obj_priv->tiling_mode);
pipesrc = I915_READ(pipesrc_reg);
OUT_RING(pipesrc & 0x0fff0fff);
} else {
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj_priv->gtt_offset);
+ OUT_RING(offset);
OUT_RING(MI_NOOP);
}
ADVANCE_LP_RING();
}
}
+/*
+ * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
+ * resume, or other times. This quirk makes sure that's the case for
+ * affected systems.
+ */
+static void quirk_pipea_force (struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->quirks |= QUIRK_PIPEA_FORCE;
+ DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
+}
+
+struct intel_quirk {
+ int device;
+ int subsystem_vendor;
+ int subsystem_device;
+ void (*hook)(struct drm_device *dev);
+};
+
+struct intel_quirk intel_quirks[] = {
+ /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
+ { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
+ /* HP Mini needs pipe A force quirk (LP: #322104) */
+ { 0x27ae,0x103c, 0x361a, quirk_pipea_force },
+
+ /* Thinkpad R31 needs pipe A force quirk */
+ { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
+ /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
+ { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
+
+ /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
+ { 0x3577, 0x1014, 0x0513, quirk_pipea_force },
+ /* ThinkPad X40 needs pipe A force quirk */
+
+ /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
+ { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
+
+ /* 855 & before need to leave pipe A & dpll A up */
+ { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+ { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+};
+
+static void intel_init_quirks(struct drm_device *dev)
+{
+ struct pci_dev *d = dev->pdev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
+ struct intel_quirk *q = &intel_quirks[i];
+
+ if (d->device == q->device &&
+ (d->subsystem_vendor == q->subsystem_vendor ||
+ q->subsystem_vendor == PCI_ANY_ID) &&
+ (d->subsystem_device == q->subsystem_device ||
+ q->subsystem_device == PCI_ANY_ID))
+ q->hook(dev);
+ }
+}
+
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev->mode_config.funcs = (void *)&intel_mode_funcs;
+ intel_init_quirks(dev);
+
intel_init_display(dev);
if (IS_I965G(dev)) {
}
}
+static void ironlake_edp_panel_on (struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long timeout = jiffies + msecs_to_jiffies(5000);
+ u32 pp, pp_status;
+
+ pp_status = I915_READ(PCH_PP_STATUS);
+ if (pp_status & PP_ON)
+ return;
+
+ pp = I915_READ(PCH_PP_CONTROL);
+ pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ do {
+ pp_status = I915_READ(PCH_PP_STATUS);
+ } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout));
+
+ if (time_after(jiffies, timeout))
+ DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status);
+
+ pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
+ I915_WRITE(PCH_PP_CONTROL, pp);
+}
+
+static void ironlake_edp_panel_off (struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long timeout = jiffies + msecs_to_jiffies(5000);
+ u32 pp, pp_status;
+
+ pp = I915_READ(PCH_PP_CONTROL);
+ pp &= ~POWER_TARGET_ON;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ do {
+ pp_status = I915_READ(PCH_PP_STATUS);
+ } while ((pp_status & PP_ON) && !time_after(jiffies, timeout));
+
+ if (time_after(jiffies, timeout))
+ DRM_DEBUG_KMS("panel off wait timed out\n");
+
+ /* Make sure VDD is enabled so DP AUX will work */
+ pp |= EDP_FORCE_VDD;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+}
+
static void ironlake_edp_backlight_on (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (mode != DRM_MODE_DPMS_ON) {
if (dp_reg & DP_PORT_EN) {
intel_dp_link_down(intel_encoder, dp_priv->DP);
- if (IS_eDP(intel_encoder))
+ if (IS_eDP(intel_encoder)) {
ironlake_edp_backlight_off(dev);
+ ironlake_edp_backlight_off(dev);
+ }
}
} else {
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
- if (IS_eDP(intel_encoder))
+ if (IS_eDP(intel_encoder)) {
+ ironlake_edp_panel_on(dev);
ironlake_edp_backlight_on(dev);
+ }
}
}
dp_priv->dpms_mode = mode;
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
+extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ struct drm_gem_object *obj);
+
extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd *mode_cmd,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(fbo, 64*1024);
+ ret = intel_pin_and_fence_fb_obj(dev, fbo);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj)
- drm_gem_object_unreference_unlocked(ifb->obj);
+ drm_gem_object_unreference(ifb->obj);
return 0;
}
return 0;
}
+static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident);
+ return 1;
+}
+
+/* The GPU hangs up on these systems if modeset is performed on LID open */
+static const struct dmi_system_id intel_no_modeset_on_lid[] = {
+ {
+ .callback = intel_no_modeset_on_lid_dmi_callback,
+ .ident = "Toshiba Tecra A11",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
+ },
+ },
+
+ { } /* terminating entry */
+};
+
/*
* Lid events. Note the use of 'modeset_on_lid':
* - we set it on lid close, and reset it on open
*/
if (connector)
connector->status = connector->funcs->detect(connector);
+ /* Don't force modeset on machines where it causes a GPU lockup */
+ if (dmi_check_system(intel_no_modeset_on_lid))
+ return NOTIFY_OK;
if (!acpi_lid_open()) {
dev_priv->modeset_on_lid = 1;
return NOTIFY_OK;
unsigned long val,
struct cpufreq_freqs *freqs)
{
-#warning "it's not clear if this is right since the core CPU (N) clock has no effect on the memory (L) clock"
switch (val) {
case CPUFREQ_PRECHANGE:
if (freqs->new > freqs->old) {
"pre-updating\n",
freqs->new / 1000, (freqs->new / 100) % 10,
freqs->old / 1000, (freqs->old / 100) % 10);
- pxa2xx_pcmcia_set_mcxx(skt, freqs->new);
+ pxa2xx_pcmcia_set_timing(skt);
}
break;
"post-updating\n",
freqs->new / 1000, (freqs->new / 100) % 10,
freqs->old / 1000, (freqs->old / 100) % 10);
- pxa2xx_pcmcia_set_mcxx(skt, freqs->new);
+ pxa2xx_pcmcia_set_timing(skt);
}
break;
}
udc_disable(udc);
}
-#ifdef CONFIG_CPU_PXA27x
+#ifdef CONFIG_PXA27x
extern void pxa27x_clear_otgph(void);
#else
#define pxa27x_clear_otgph() do {} while (0)
__raw_writel(uhchr & ~UHCHR_FHR, ohci->mmio_base + UHCHR);
}
-#ifdef CONFIG_CPU_PXA27x
+#ifdef CONFIG_PXA27x
extern void pxa27x_clear_otgph(void);
#else
#define pxa27x_clear_otgph() do {} while (0)