diff options
Diffstat (limited to 'target/linux/patches/3.4.112/nds32.patch')
-rw-r--r-- | target/linux/patches/3.4.112/nds32.patch | 72132 |
1 files changed, 72132 insertions, 0 deletions
diff --git a/target/linux/patches/3.4.112/nds32.patch b/target/linux/patches/3.4.112/nds32.patch new file mode 100644 index 000000000..d0da6f7b3 --- /dev/null +++ b/target/linux/patches/3.4.112/nds32.patch @@ -0,0 +1,72132 @@ +diff -Nur linux-3.4.110.orig/arch/nds32/boot/install.sh linux-3.4.110/arch/nds32/boot/install.sh +--- linux-3.4.110.orig/arch/nds32/boot/install.sh 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/boot/install.sh 2016-04-07 10:20:50.862077930 +0200 +@@ -0,0 +1,47 @@ ++#!/bin/sh ++# ++# arch/nds32/boot/install.sh ++# ++# This file is subject to the terms and conditions of the GNU General Public ++# License. See the file "COPYING" in the main directory of this archive ++# for more details. ++# ++# Copyright (C) 1995 by Linus Torvalds ++# Copyright (C) 2009 Andes Technology Corporation ++# ++# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin ++# Adapted from code in arch/i386/boot/install.sh by Russell King ++# ++# "make install" script for arm architecture ++# ++# Arguments: ++# $1 - kernel version ++# $2 - kernel image file ++# $3 - kernel map file ++# $4 - default install path (blank if root directory) ++# ++ ++# User may have a custom install script ++if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi ++if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi ++ ++# Normal install ++echo "Installing normal kernel" ++base=vmlinux ++ ++if [ -f $4/$base-$1 ]; then ++ mv $4/$base-$1 $4/$base-$1.old ++fi ++cat $2 > $4/$base-$1 ++ ++# Install system map file ++if [ -f $4/System.map-$1 ]; then ++ mv $4/System.map-$1 $4/System.map-$1.old ++fi ++cp $3 $4/System.map-$1 ++ ++if [ -x /sbin/loadmap ]; then ++ /sbin/loadmap ++else ++ echo "You have to install it yourself" ++fi +diff -Nur linux-3.4.110.orig/arch/nds32/boot/Makefile linux-3.4.110/arch/nds32/boot/Makefile +--- linux-3.4.110.orig/arch/nds32/boot/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/boot/Makefile 2016-04-07 10:20:50.862077930 +0200 +@@ -0,0 +1,22 @@ ++# ++# arch/nds32/boot/Makefile ++# ++# This file is subject to the terms and conditions of the GNU General Public ++# License. See the file "COPYING" in the main directory of this archive ++# for more details. ++# ++# Copyright (C) 1995-2002 Russell King ++# Copyright (C) 2009 Andes Technology Corporation ++# ++ ++targets := Image ++ ++$(obj)/Image: vmlinux FORCE ++ $(call if_changed,objcopy) ++ @echo ' Kernel: $@ is ready' ++ ++.PHONY: FORCE ++install: $(obj)/Image ++ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ ++ $(obj)/Image System.map "$(INSTALL_PATH)" ++ +diff -Nur linux-3.4.110.orig/arch/nds32/common/dmabounce.c linux-3.4.110/arch/nds32/common/dmabounce.c +--- linux-3.4.110.orig/arch/nds32/common/dmabounce.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/common/dmabounce.c 2016-04-07 10:20:50.882078703 +0200 +@@ -0,0 +1,672 @@ ++/* ++ * arch/nds32/common/dmabounce.c ++ * ++ * Special dma_{map/unmap/dma_sync}_* routines for systems that have ++ * limited DMA windows. These functions utilize bounce buffers to ++ * copy data to/from buffers located outside the DMA region. This ++ * only works for systems in which DMA memory is at the bottom of ++ * RAM and the remainder of memory is at the top an the DMA memory ++ * can be marked as ZONE_DMA. Anything beyond that such as discontigous ++ * DMA windows will require custom implementations that reserve memory ++ * areas at early bootup. ++ * ++ * Original version by Brad Parker (brad@heeltoe.com) ++ * Re-written by Christopher Hoover <ch@murgatroid.com> ++ * Made generic by Deepak Saxena <dsaxena@plexity.net> ++ * ++ * Copyright (C) 2002 Hewlett Packard Company. ++ * Copyright (C) 2004 MontaVista Software, Inc. ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * version 2 as published by the Free Software Foundation. ++ */ ++ ++#include <linux/module.h> ++#include <linux/init.h> ++#include <linux/slab.h> ++#include <linux/device.h> ++#include <linux/dma-mapping.h> ++#include <linux/dmapool.h> ++#include <linux/list.h> ++ ++#undef DEBUG ++ ++#undef STATS ++#ifdef STATS ++#define DO_STATS(X) do { X ; } while (0) ++#else ++#define DO_STATS(X) do { } while (0) ++#endif ++ ++/* ************************************************** */ ++ ++struct safe_buffer { ++ struct list_head node; ++ ++ /* original request */ ++ void *ptr; ++ size_t size; ++ int direction; ++ ++ /* safe buffer info */ ++ struct dma_pool *pool; ++ void *safe; ++ dma_addr_t safe_dma_addr; ++}; ++ ++struct dmabounce_device_info { ++ struct list_head node; ++ ++ struct device *dev; ++ struct dma_pool *small_buffer_pool; ++ struct dma_pool *large_buffer_pool; ++ struct list_head safe_buffers; ++ unsigned long small_buffer_size, large_buffer_size; ++#ifdef STATS ++ unsigned long sbp_allocs; ++ unsigned long lbp_allocs; ++ unsigned long total_allocs; ++ unsigned long map_op_count; ++ unsigned long bounce_count; ++#endif ++}; ++ ++static LIST_HEAD(dmabounce_devs); ++ ++#ifdef STATS ++static void print_alloc_stats(struct dmabounce_device_info *device_info) ++{ ++ printk(KERN_INFO ++ "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n", ++ device_info->dev->bus_id, ++ device_info->sbp_allocs, device_info->lbp_allocs, ++ device_info->total_allocs - device_info->sbp_allocs - ++ device_info->lbp_allocs, device_info->total_allocs); ++} ++#endif ++ ++/* find the given device in the dmabounce device list */ ++static inline struct dmabounce_device_info *find_dmabounce_dev(struct device ++ *dev) ++{ ++ struct list_head *entry; ++ ++ list_for_each(entry, &dmabounce_devs) { ++ struct dmabounce_device_info *d = ++ list_entry(entry, struct dmabounce_device_info, node); ++ ++ if (d->dev == dev) ++ return d; ++ } ++ return NULL; ++} ++ ++/* allocate a 'safe' buffer and keep track of it */ ++static inline struct safe_buffer *alloc_safe_buffer(struct dmabounce_device_info ++ *device_info, void *ptr, ++ size_t size, ++ enum dma_data_direction dir) ++{ ++ struct safe_buffer *buf; ++ struct dma_pool *pool; ++ struct device *dev = device_info->dev; ++ void *safe; ++ dma_addr_t safe_dma_addr; ++ ++ dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", __func__, ptr, size, dir); ++ ++ DO_STATS(device_info->total_allocs++); ++ ++ buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); ++ if (buf == NULL) { ++ dev_warn(dev, "%s: kmalloc failed\n", __func__); ++ return NULL; ++ } ++ ++ if (size <= device_info->small_buffer_size) { ++ pool = device_info->small_buffer_pool; ++ safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr); ++ ++ DO_STATS(device_info->sbp_allocs++); ++ } else if (size <= device_info->large_buffer_size) { ++ pool = device_info->large_buffer_pool; ++ safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr); ++ ++ DO_STATS(device_info->lbp_allocs++); ++ } else { ++ pool = NULL; ++ safe = ++ dma_alloc_coherent(dev, size, &safe_dma_addr, GFP_ATOMIC); ++ } ++ ++ if (safe == NULL) { ++ dev_warn(device_info->dev, ++ "%s: could not alloc dma memory (size=%d)\n", ++ __func__, size); ++ kfree(buf); ++ return NULL; ++ } ++#ifdef STATS ++ if (device_info->total_allocs % 1000 == 0) ++ print_alloc_stats(device_info); ++#endif ++ ++ buf->ptr = ptr; ++ buf->size = size; ++ buf->direction = dir; ++ buf->pool = pool; ++ buf->safe = safe; ++ buf->safe_dma_addr = safe_dma_addr; ++ ++ list_add(&buf->node, &device_info->safe_buffers); ++ ++ return buf; ++} ++ ++/* determine if a buffer is from our "safe" pool */ ++static inline struct safe_buffer *find_safe_buffer(struct dmabounce_device_info ++ *device_info, ++ dma_addr_t safe_dma_addr) ++{ ++ struct list_head *entry; ++ ++ list_for_each(entry, &device_info->safe_buffers) { ++ struct safe_buffer *b = ++ list_entry(entry, struct safe_buffer, node); ++ ++ if (b->safe_dma_addr == safe_dma_addr) ++ return b; ++ } ++ ++ return NULL; ++} ++ ++static inline void ++free_safe_buffer(struct dmabounce_device_info *device_info, ++ struct safe_buffer *buf) ++{ ++ dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf); ++ ++ list_del(&buf->node); ++ ++ if (buf->pool) ++ dma_pool_free(buf->pool, buf->safe, buf->safe_dma_addr); ++ else ++ dma_free_coherent(device_info->dev, buf->size, buf->safe, ++ buf->safe_dma_addr); ++ ++ kfree(buf); ++} ++ ++/* ************************************************** */ ++ ++#ifdef STATS ++ ++static void print_map_stats(struct dmabounce_device_info *device_info) ++{ ++ printk(KERN_INFO ++ "%s: dmabounce: map_op_count=%lu, bounce_count=%lu\n", ++ device_info->dev->bus_id, ++ device_info->map_op_count, device_info->bounce_count); ++} ++#endif ++ ++static inline dma_addr_t ++map_single(struct device *dev, void *ptr, size_t size, ++ enum dma_data_direction dir) ++{ ++ struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); ++ dma_addr_t dma_addr; ++ int needs_bounce = 0; ++ ++ if (device_info) ++ DO_STATS(device_info->map_op_count++); ++ ++ dma_addr = virt_to_dma(dev, ptr); ++ ++ if (dev->dma_mask) { ++ unsigned long mask = *dev->dma_mask; ++ unsigned long limit; ++ ++ limit = (mask + 1) & ~mask; ++ if (limit && size > limit) { ++ dev_err(dev, "DMA mapping too big (requested %#x " ++ "mask %#Lx)\n", size, *dev->dma_mask); ++ return ~0; ++ } ++ ++ /* ++ * Figure out if we need to bounce from the DMA mask. ++ */ ++ needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask; ++ } ++ ++ if (device_info ++ && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { ++ struct safe_buffer *buf; ++ ++ buf = alloc_safe_buffer(device_info, ptr, size, dir); ++ if (buf == 0) { ++ dev_err(dev, "%s: unable to map unsafe buffer %p!\n", ++ __func__, ptr); ++ return 0; ++ } ++ ++ dev_dbg(dev, ++ "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", ++ __func__, buf->ptr, (void *)virt_to_dma(dev, buf->ptr), ++ buf->safe, (void *)buf->safe_dma_addr); ++ ++ if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL)) { ++ dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", ++ __func__, ptr, buf->safe, size); ++ memcpy(buf->safe, ptr, size); ++ } ++ consistent_sync(buf->safe, size, dir); ++ ++ dma_addr = buf->safe_dma_addr; ++ } else { ++ consistent_sync(ptr, size, dir); ++ } ++ ++ return dma_addr; ++} ++ ++static inline void ++unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, ++ enum dma_data_direction dir) ++{ ++ struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); ++ struct safe_buffer *buf = NULL; ++ ++ /* ++ * Trying to unmap an invalid mapping ++ */ ++ if (dma_addr == ~0) { ++ dev_err(dev, "Trying to unmap invalid mapping\n"); ++ return; ++ } ++ ++ if (device_info) ++ buf = find_safe_buffer(device_info, dma_addr); ++ ++ if (buf) { ++ BUG_ON(buf->size != size); ++ ++ dev_dbg(dev, ++ "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", ++ __func__, buf->ptr, (void *)virt_to_dma(dev, buf->ptr), ++ buf->safe, (void *)buf->safe_dma_addr); ++ ++ DO_STATS(device_info->bounce_count++); ++ ++ if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)) { ++ dev_dbg(dev, ++ "%s: copy back safe %p to unsafe %p size %d\n", ++ __func__, buf->safe, buf->ptr, size); ++ memcpy(buf->ptr, buf->safe, size); ++ } ++ free_safe_buffer(device_info, buf); ++ } ++} ++ ++static inline void ++sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, ++ enum dma_data_direction dir) ++{ ++ struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); ++ struct safe_buffer *buf = NULL; ++ ++ if (device_info) ++ buf = find_safe_buffer(device_info, dma_addr); ++ ++ if (buf) { ++ /* ++ * Both of these checks from original code need to be ++ * commented out b/c some drivers rely on the following: ++ * ++ * 1) Drivers may map a large chunk of memory into DMA space ++ * but only sync a small portion of it. Good example is ++ * allocating a large buffer, mapping it, and then ++ * breaking it up into small descriptors. No point ++ * in syncing the whole buffer if you only have to ++ * touch one descriptor. ++ * ++ * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are ++ * usually only synced in one dir at a time. ++ * ++ * See drivers/net/eepro100.c for examples of both cases. ++ * ++ * -ds ++ * ++ * BUG_ON(buf->size != size); ++ * BUG_ON(buf->direction != dir); ++ */ ++ ++ dev_dbg(dev, ++ "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", ++ __func__, buf->ptr, (void *)virt_to_dma(dev, buf->ptr), ++ buf->safe, (void *)buf->safe_dma_addr); ++ ++ DO_STATS(device_info->bounce_count++); ++ ++ switch (dir) { ++ case DMA_FROM_DEVICE: ++ dev_dbg(dev, ++ "%s: copy back safe %p to unsafe %p size %d\n", ++ __func__, buf->safe, buf->ptr, size); ++ memcpy(buf->ptr, buf->safe, size); ++ break; ++ case DMA_TO_DEVICE: ++ dev_dbg(dev, ++ "%s: copy out unsafe %p to safe %p, size %d\n", ++ __func__, buf->ptr, buf->safe, size); ++ memcpy(buf->safe, buf->ptr, size); ++ break; ++ case DMA_BIDIRECTIONAL: ++ BUG(); /* is this allowed? what does it mean? */ ++ default: ++ BUG(); ++ } ++ consistent_sync(buf->safe, size, dir); ++ } else { ++ consistent_sync(dma_to_virt(dev, dma_addr), size, dir); ++ } ++} ++ ++/* ************************************************** */ ++ ++/* ++ * see if a buffer address is in an 'unsafe' range. if it is ++ * allocate a 'safe' buffer and copy the unsafe buffer into it. ++ * substitute the safe buffer for the unsafe one. ++ * (basically move the buffer from an unsafe area to a safe one) ++ */ ++dma_addr_t ++dma_map_single(struct device *dev, void *ptr, size_t size, ++ enum dma_data_direction dir) ++{ ++ unsigned long flags; ++ dma_addr_t dma_addr; ++ ++ dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", __func__, ptr, size, dir); ++ ++ BUG_ON(dir == DMA_NONE); ++ ++ local_irq_save(flags); ++ ++ dma_addr = map_single(dev, ptr, size, dir); ++ ++ local_irq_restore(flags); ++ ++ return dma_addr; ++} ++ ++/* ++ * see if a mapped address was really a "safe" buffer and if so, copy ++ * the data from the safe buffer back to the unsafe buffer and free up ++ * the safe buffer. (basically return things back to the way they ++ * should be) ++ */ ++ ++void ++dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, ++ enum dma_data_direction dir) ++{ ++ unsigned long flags; ++ ++ dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", ++ __func__, (void *)dma_addr, size, dir); ++ ++ BUG_ON(dir == DMA_NONE); ++ ++ local_irq_save(flags); ++ ++ unmap_single(dev, dma_addr, size, dir); ++ ++ local_irq_restore(flags); ++} ++ ++int ++dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, ++ enum dma_data_direction dir) ++{ ++ unsigned long flags; ++ int i; ++ ++ dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", __func__, sg, nents, dir); ++ ++ BUG_ON(dir == DMA_NONE); ++ ++ local_irq_save(flags); ++ ++ for (i = 0; i < nents; i++, sg++) { ++ struct page *page = sg->page; ++ unsigned int offset = sg->offset; ++ unsigned int length = sg->length; ++ void *ptr = page_address(page) + offset; ++ ++ sg->dma_address = map_single(dev, ptr, length, dir); ++ } ++ ++ local_irq_restore(flags); ++ ++ return nents; ++} ++ ++void ++dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, ++ enum dma_data_direction dir) ++{ ++ unsigned long flags; ++ int i; ++ ++ dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", __func__, sg, nents, dir); ++ ++ BUG_ON(dir == DMA_NONE); ++ ++ local_irq_save(flags); ++ ++ for (i = 0; i < nents; i++, sg++) { ++ dma_addr_t dma_addr = sg->dma_address; ++ unsigned int length = sg->length; ++ ++ unmap_single(dev, dma_addr, length, dir); ++ } ++ ++ local_irq_restore(flags); ++} ++ ++void ++dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size, ++ enum dma_data_direction dir) ++{ ++ unsigned long flags; ++ ++ dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", ++ __func__, (void *)dma_addr, size, dir); ++ ++ local_irq_save(flags); ++ ++ sync_single(dev, dma_addr, size, dir); ++ ++ local_irq_restore(flags); ++} ++ ++void ++dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size, ++ enum dma_data_direction dir) ++{ ++ unsigned long flags; ++ ++ dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", ++ __func__, (void *)dma_addr, size, dir); ++ ++ local_irq_save(flags); ++ ++ sync_single(dev, dma_addr, size, dir); ++ ++ local_irq_restore(flags); ++} ++ ++void ++dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, ++ enum dma_data_direction dir) ++{ ++ unsigned long flags; ++ int i; ++ ++ dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", __func__, sg, nents, dir); ++ ++ BUG_ON(dir == DMA_NONE); ++ ++ local_irq_save(flags); ++ ++ for (i = 0; i < nents; i++, sg++) { ++ dma_addr_t dma_addr = sg->dma_address; ++ unsigned int length = sg->length; ++ ++ sync_single(dev, dma_addr, length, dir); ++ } ++ ++ local_irq_restore(flags); ++} ++ ++void ++dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, ++ enum dma_data_direction dir) ++{ ++ unsigned long flags; ++ int i; ++ ++ dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", __func__, sg, nents, dir); ++ ++ BUG_ON(dir == DMA_NONE); ++ ++ local_irq_save(flags); ++ ++ for (i = 0; i < nents; i++, sg++) { ++ dma_addr_t dma_addr = sg->dma_address; ++ unsigned int length = sg->length; ++ ++ sync_single(dev, dma_addr, length, dir); ++ } ++ ++ local_irq_restore(flags); ++} ++ ++int ++dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, ++ unsigned long large_buffer_size) ++{ ++ struct dmabounce_device_info *device_info; ++ ++ device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); ++ if (!device_info) { ++ printk(KERN_ERR ++ "Could not allocated dmabounce_device_info for %s", ++ dev->bus_id); ++ return -ENOMEM; ++ } ++ ++ device_info->small_buffer_pool = ++ dma_pool_create("small_dmabounce_pool", ++ dev, small_buffer_size, 0 /* byte alignment */ , ++ 0 /* no page-crossing issues */ ); ++ if (!device_info->small_buffer_pool) { ++ printk(KERN_ERR ++ "dmabounce: could not allocate small DMA pool for %s\n", ++ dev->bus_id); ++ kfree(device_info); ++ return -ENOMEM; ++ } ++ ++ if (large_buffer_size) { ++ device_info->large_buffer_pool = ++ dma_pool_create("large_dmabounce_pool", ++ dev, ++ large_buffer_size, 0 /* byte alignment */ , ++ 0 /* no page-crossing issues */ ); ++ if (!device_info->large_buffer_pool) { ++ printk(KERN_ERR ++ "dmabounce: could not allocate large DMA pool for %s\n", ++ dev->bus_id); ++ dma_pool_destroy(device_info->small_buffer_pool); ++ ++ return -ENOMEM; ++ } ++ } ++ ++ device_info->dev = dev; ++ device_info->small_buffer_size = small_buffer_size; ++ device_info->large_buffer_size = large_buffer_size; ++ INIT_LIST_HEAD(&device_info->safe_buffers); ++ ++#ifdef STATS ++ device_info->sbp_allocs = 0; ++ device_info->lbp_allocs = 0; ++ device_info->total_allocs = 0; ++ device_info->map_op_count = 0; ++ device_info->bounce_count = 0; ++#endif ++ ++ list_add(&device_info->node, &dmabounce_devs); ++ ++ printk(KERN_INFO "dmabounce: registered device %s on %s bus\n", ++ dev->bus_id, dev->bus->name); ++ ++ return 0; ++} ++ ++void dmabounce_unregister_dev(struct device *dev) ++{ ++ struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); ++ ++ if (!device_info) { ++ printk(KERN_WARNING ++ "%s: Never registered with dmabounce but attempting" ++ "to unregister!\n", dev->bus_id); ++ return; ++ } ++ ++ if (!list_empty(&device_info->safe_buffers)) { ++ printk(KERN_ERR ++ "%s: Removing from dmabounce with pending buffers!\n", ++ dev->bus_id); ++ BUG(); ++ } ++ ++ if (device_info->small_buffer_pool) ++ dma_pool_destroy(device_info->small_buffer_pool); ++ if (device_info->large_buffer_pool) ++ dma_pool_destroy(device_info->large_buffer_pool); ++ ++#ifdef STATS ++ print_alloc_stats(device_info); ++ print_map_stats(device_info); ++#endif ++ ++ list_del(&device_info->node); ++ ++ kfree(device_info); ++ ++ printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n", ++ dev->bus_id, dev->bus->name); ++} ++ ++EXPORT_SYMBOL(dma_map_single); ++EXPORT_SYMBOL(dma_unmap_single); ++EXPORT_SYMBOL(dma_map_sg); ++EXPORT_SYMBOL(dma_unmap_sg); ++EXPORT_SYMBOL(dma_sync_single); ++EXPORT_SYMBOL(dma_sync_sg); ++EXPORT_SYMBOL(dmabounce_register_dev); ++EXPORT_SYMBOL(dmabounce_unregister_dev); ++ ++MODULE_AUTHOR ++ ("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>"); ++MODULE_DESCRIPTION ++ ("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows"); ++MODULE_LICENSE("GPL"); +diff -Nur linux-3.4.110.orig/arch/nds32/common/Makefile linux-3.4.110/arch/nds32/common/Makefile +--- linux-3.4.110.orig/arch/nds32/common/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/common/Makefile 2016-04-07 10:20:50.882078703 +0200 +@@ -0,0 +1,6 @@ ++# ++# Makefile for the linux kernel. ++# ++ ++obj-y += rtctime.o ++obj-$(CONFIG_DMABOUNCE) += dmabounce.o +diff -Nur linux-3.4.110.orig/arch/nds32/common/rtctime.c linux-3.4.110/arch/nds32/common/rtctime.c +--- linux-3.4.110.orig/arch/nds32/common/rtctime.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/common/rtctime.c 2016-04-07 10:20:50.882078703 +0200 +@@ -0,0 +1,441 @@ ++/* ++ * linux/arch/nds32/common/rtctime.c ++ * ++ * Copyright (C) 2003 Deep Blue Solutions Ltd. ++ * Based on sa1100-rtc.c, Nils Faerber, CIH, Nicolas Pitre. ++ * Based on rtc.c by Paul Gortmaker ++ * Copyright (C) 2009 Andes Technology Corporation ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#include <linux/module.h> ++#include <linux/kernel.h> ++#include <linux/time.h> ++#include <linux/rtc.h> ++#include <linux/poll.h> ++#include <linux/proc_fs.h> ++#include <linux/miscdevice.h> ++#include <linux/spinlock.h> ++#include <linux/capability.h> ++#include <linux/device.h> ++#include <linux/mutex.h> ++#include <linux/rtc.h> ++ ++#include <asm/rtc.h> ++#include <asm/semaphore.h> ++ ++static DECLARE_WAIT_QUEUE_HEAD(rtc_wait); ++static struct fasync_struct *rtc_async_queue; ++ ++/* ++ * rtc_lock protects rtc_irq_data ++ */ ++static DEFINE_SPINLOCK(rtc_lock); ++static unsigned long rtc_irq_data; ++ ++/* ++ * rtc_sem protects rtc_inuse and rtc_ops ++ */ ++static DEFINE_MUTEX(rtc_mutex); ++static unsigned long rtc_inuse; ++static struct rtc_ops *rtc_ops; ++ ++#define rtc_epoch 1900UL ++ ++/* ++ * Calculate the next alarm time given the requested alarm time mask ++ * and the current time. ++ */ ++void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, ++ struct rtc_time *alrm) ++{ ++ unsigned long next_time; ++ unsigned long now_time; ++ ++ next->tm_year = now->tm_year; ++ next->tm_mon = now->tm_mon; ++ next->tm_mday = now->tm_mday; ++ next->tm_hour = alrm->tm_hour; ++ next->tm_min = alrm->tm_min; ++ next->tm_sec = alrm->tm_sec; ++ ++ rtc_tm_to_time(now, &now_time); ++ rtc_tm_to_time(next, &next_time); ++ ++ if (next_time < now_time) { ++ /* Advance one day */ ++ next_time += 60 * 60 * 24; ++ rtc_time_to_tm(next_time, next); ++ } ++} ++ ++static inline int rtc_arm_read_time(struct rtc_ops *ops, struct rtc_time *tm) ++{ ++ memset(tm, 0, sizeof(struct rtc_time)); ++ return ops->read_time(tm); ++} ++ ++static inline int rtc_arm_set_time(struct rtc_ops *ops, struct rtc_time *tm) ++{ ++ int ret; ++ ++ ret = rtc_valid_tm(tm); ++ if (ret == 0) ++ ret = ops->set_time(tm); ++ ++ return ret; ++} ++ ++static inline int rtc_arm_read_alarm(struct rtc_ops *ops, ++ struct rtc_wkalrm *alrm) ++{ ++ int ret = -EINVAL; ++ if (ops->read_alarm) { ++ memset(alrm, 0, sizeof(struct rtc_wkalrm)); ++ ret = ops->read_alarm(alrm); ++ } ++ return ret; ++} ++ ++static inline int rtc_arm_set_alarm(struct rtc_ops *ops, ++ struct rtc_wkalrm *alrm) ++{ ++ int ret = -EINVAL; ++ if (ops->set_alarm) ++ ret = ops->set_alarm(alrm); ++ return ret; ++} ++ ++void rtc_update(unsigned long num, unsigned long events) ++{ ++ spin_lock(&rtc_lock); ++ rtc_irq_data = (rtc_irq_data + (num << 8)) | events; ++ spin_unlock(&rtc_lock); ++ ++ wake_up_interruptible(&rtc_wait); ++ kill_fasync(&rtc_async_queue, SIGIO, POLL_IN); ++} ++ ++EXPORT_SYMBOL(rtc_update); ++ ++static ssize_t ++rtc_read(struct file *file, char __user * buf, size_t count, loff_t * ppos) ++{ ++ DECLARE_WAITQUEUE(wait, current); ++ unsigned long data; ++ ssize_t ret; ++ ++ if (count < sizeof(unsigned long)) ++ return -EINVAL; ++ ++ add_wait_queue(&rtc_wait, &wait); ++ do { ++ __set_current_state(TASK_INTERRUPTIBLE); ++ ++ spin_lock_irq(&rtc_lock); ++ data = rtc_irq_data; ++ rtc_irq_data = 0; ++ spin_unlock_irq(&rtc_lock); ++ ++ if (data != 0) { ++ ret = 0; ++ break; ++ } ++ if (file->f_flags & O_NONBLOCK) { ++ ret = -EAGAIN; ++ break; ++ } ++ if (signal_pending(current)) { ++ ret = -ERESTARTSYS; ++ break; ++ } ++ schedule(); ++ } while (1); ++ set_current_state(TASK_RUNNING); ++ remove_wait_queue(&rtc_wait, &wait); ++ ++ if (ret == 0) { ++ ret = put_user(data, (unsigned long __user *)buf); ++ if (ret == 0) ++ ret = sizeof(unsigned long); ++ } ++ return ret; ++} ++ ++static unsigned int rtc_poll(struct file *file, poll_table * wait) ++{ ++ unsigned long data; ++ ++ poll_wait(file, &rtc_wait, wait); ++ ++ spin_lock_irq(&rtc_lock); ++ data = rtc_irq_data; ++ spin_unlock_irq(&rtc_lock); ++ ++ return data != 0 ? POLLIN | POLLRDNORM : 0; ++} ++ ++static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ struct rtc_ops *ops = file->private_data; ++ struct rtc_time tm; ++ struct rtc_wkalrm alrm; ++ void __user *uarg = (void __user *)arg; ++ int ret = -EINVAL; ++ ++ switch (cmd) { ++ case RTC_ALM_READ: ++ ret = rtc_arm_read_alarm(ops, &alrm); ++ if (ret) ++ break; ++ ret = copy_to_user(uarg, &alrm.time, sizeof(tm)); ++ if (ret) ++ ret = -EFAULT; ++ break; ++ ++ case RTC_ALM_SET: ++ ret = copy_from_user(&alrm.time, uarg, sizeof(tm)); ++ if (ret) { ++ ret = -EFAULT; ++ break; ++ } ++ alrm.enabled = 0; ++ alrm.pending = 0; ++ alrm.time.tm_mday = -1; ++ alrm.time.tm_mon = -1; ++ alrm.time.tm_year = -1; ++ alrm.time.tm_wday = -1; ++ alrm.time.tm_yday = -1; ++ alrm.time.tm_isdst = -1; ++ ret = rtc_arm_set_alarm(ops, &alrm); ++ break; ++ ++ case RTC_RD_TIME: ++ ret = rtc_arm_read_time(ops, &tm); ++ if (ret) ++ break; ++ ret = copy_to_user(uarg, &tm, sizeof(tm)); ++ if (ret) ++ ret = -EFAULT; ++ break; ++ ++ case RTC_SET_TIME: ++ if (!capable(CAP_SYS_TIME)) { ++ ret = -EACCES; ++ break; ++ } ++ ret = copy_from_user(&tm, uarg, sizeof(tm)); ++ if (ret) { ++ ret = -EFAULT; ++ break; ++ } ++ ret = rtc_arm_set_time(ops, &tm); ++ break; ++ ++ case RTC_EPOCH_SET: ++#ifndef rtc_epoch ++ /* ++ * There were no RTC clocks before 1900. ++ */ ++ if (arg < 1900) { ++ ret = -EINVAL; ++ break; ++ } ++ if (!capable(CAP_SYS_TIME)) { ++ ret = -EACCES; ++ break; ++ } ++ rtc_epoch = arg; ++ ret = 0; ++#endif ++ break; ++ ++ case RTC_EPOCH_READ: ++ ret = put_user(rtc_epoch, (unsigned long __user *)uarg); ++ break; ++ ++ case RTC_WKALM_SET: ++ ret = copy_from_user(&alrm, uarg, sizeof(alrm)); ++ if (ret) { ++ ret = -EFAULT; ++ break; ++ } ++ ret = rtc_arm_set_alarm(ops, &alrm); ++ break; ++ ++ case RTC_WKALM_RD: ++ ret = rtc_arm_read_alarm(ops, &alrm); ++ if (ret) ++ break; ++ ret = copy_to_user(uarg, &alrm, sizeof(alrm)); ++ if (ret) ++ ret = -EFAULT; ++ break; ++ ++ default: ++ if (ops->ioctl) ++ ret = ops->ioctl(cmd, arg); ++ break; ++ } ++ return ret; ++} ++ ++static int rtc_open(struct inode *inode, struct file *file) ++{ ++ int ret; ++ ++ mutex_lock(&rtc_mutex); ++ ++ if (rtc_inuse) { ++ ret = -EBUSY; ++ } else if (!rtc_ops || !try_module_get(rtc_ops->owner)) { ++ ret = -ENODEV; ++ } else { ++ file->private_data = rtc_ops; ++ ++ ret = rtc_ops->open ? rtc_ops->open() : 0; ++ if (ret == 0) { ++ spin_lock_irq(&rtc_lock); ++ rtc_irq_data = 0; ++ spin_unlock_irq(&rtc_lock); ++ ++ rtc_inuse = 1; ++ } ++ } ++ mutex_unlock(&rtc_mutex); ++ ++ return ret; ++} ++ ++static int rtc_release(struct inode *inode, struct file *file) ++{ ++ struct rtc_ops *ops = file->private_data; ++ ++ if (ops->release) ++ ops->release(); ++ ++ spin_lock_irq(&rtc_lock); ++ rtc_irq_data = 0; ++ spin_unlock_irq(&rtc_lock); ++ ++ module_put(rtc_ops->owner); ++ rtc_inuse = 0; ++ ++ return 0; ++} ++ ++static int rtc_fasync(int fd, struct file *file, int on) ++{ ++ return fasync_helper(fd, file, on, &rtc_async_queue); ++} ++ ++static struct file_operations rtc_fops = { ++ .owner = THIS_MODULE, ++ .llseek = no_llseek, ++ .read = rtc_read, ++ .poll = rtc_poll, ++ .ioctl = rtc_ioctl, ++ .open = rtc_open, ++ .release = rtc_release, ++ .fasync = rtc_fasync, ++}; ++ ++static struct miscdevice rtc_miscdev = { ++ .minor = RTC_MINOR, ++ .name = "rtc", ++ .fops = &rtc_fops, ++}; ++ ++static int rtc_read_proc(char *page, char **start, off_t off, int count, ++ int *eof, void *data) ++{ ++ struct rtc_ops *ops = data; ++ struct rtc_wkalrm alrm; ++ struct rtc_time tm; ++ char *p = page; ++ ++ if (rtc_arm_read_time(ops, &tm) == 0) { ++ p += sprintf(p, ++ "rtc_time\t: %02d:%02d:%02d\n" ++ "rtc_date\t: %04d-%02d-%02d\n" ++ "rtc_epoch\t: %04lu\n", ++ tm.tm_hour, tm.tm_min, tm.tm_sec, ++ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, ++ rtc_epoch); ++ } ++ ++ if (rtc_arm_read_alarm(ops, &alrm) == 0) { ++ p += sprintf(p, "alrm_time\t: "); ++ if ((unsigned int)alrm.time.tm_hour <= 24) ++ p += sprintf(p, "%02d:", alrm.time.tm_hour); ++ else ++ p += sprintf(p, "**:"); ++ if ((unsigned int)alrm.time.tm_min <= 59) ++ p += sprintf(p, "%02d:", alrm.time.tm_min); ++ else ++ p += sprintf(p, "**:"); ++ if ((unsigned int)alrm.time.tm_sec <= 59) ++ p += sprintf(p, "%02d\n", alrm.time.tm_sec); ++ else ++ p += sprintf(p, "**\n"); ++ ++ p += sprintf(p, "alrm_date\t: "); ++ if ((unsigned int)alrm.time.tm_year <= 200) ++ p += sprintf(p, "%04d-", alrm.time.tm_year + 1900); ++ else ++ p += sprintf(p, "****-"); ++ if ((unsigned int)alrm.time.tm_mon <= 11) ++ p += sprintf(p, "%02d-", alrm.time.tm_mon + 1); ++ else ++ p += sprintf(p, "**-"); ++ if ((unsigned int)alrm.time.tm_mday <= 31) ++ p += sprintf(p, "%02d\n", alrm.time.tm_mday); ++ else ++ p += sprintf(p, "**\n"); ++ p += sprintf(p, "alrm_wakeup\t: %s\n", ++ alrm.enabled ? "yes" : "no"); ++ p += sprintf(p, "alrm_pending\t: %s\n", ++ alrm.pending ? "yes" : "no"); ++ } ++ ++ if (ops->proc) ++ p += ops->proc(p); ++ ++ return p - page; ++} ++ ++int register_rtc(struct rtc_ops *ops) ++{ ++ int ret = -EBUSY; ++ ++ mutex_lock(&rtc_mutex); ++ if (rtc_ops == NULL) { ++ rtc_ops = ops; ++ ++ ret = misc_register(&rtc_miscdev); ++ if (ret == 0) ++ create_proc_read_entry("driver/rtc", 0, NULL, ++ rtc_read_proc, ops); ++ } ++ mutex_unlock(&rtc_mutex); ++ ++ return ret; ++} ++ ++EXPORT_SYMBOL(register_rtc); ++ ++void unregister_rtc(struct rtc_ops *rtc) ++{ ++ mutex_lock(&rtc_mutex); ++ if (rtc == rtc_ops) { ++ remove_proc_entry("driver/rtc", NULL); ++ misc_deregister(&rtc_miscdev); ++ rtc_ops = NULL; ++ } ++ mutex_unlock(&rtc_mutex); ++} ++ ++EXPORT_SYMBOL(unregister_rtc); +diff -Nur linux-3.4.110.orig/arch/nds32/configs/orca_8k_defconfig linux-3.4.110/arch/nds32/configs/orca_8k_defconfig +--- linux-3.4.110.orig/arch/nds32/configs/orca_8k_defconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-3.4.110/arch/nds32/configs/orca_8k_defconfig 2016-04-07 10:20:50.882078703 +0200 +@@ -0,0 +1,132 @@ ++CONFIG_EXPERIMENTAL=y ++CONFIG_CROSS_COMPILE="nds32le-linux-" |