Pull microblaze updates from Michal Simek.
* 'next' of git://git.monstr.eu/linux-2.6-microblaze:
microblaze: Enable IRQ in arch_cpu_idle
microblaze: Fix uaccess_ok macro
microblaze: Add support for new cpu versions and target architecture
microblaze: Do not select OPT_LIB_ASM by default
microblaze: Fix initrd support
microblaze: Do not use r6 in head.S
microblaze: pci: Remove duplicated header
microblaze: Set the default irq_domain
microblaze: pci: Remove duplicated include from pci-common.c
The /sys/class/mtd/mtd{0,1,2,3,...} directories correspond
to each /dev/mtdX character device. These may represent
physical/simulated flash devices, partitions on a flash
- device, or concatenated flash devices. They exist regardless
- of whether CONFIG_MTD_CHAR is actually enabled.
+ device, or concatenated flash devices.
What: /sys/class/mtd/mtdXro/
Date: April 2009
Contact: linux-mtd@lists.infradead.org
Description:
These directories provide the corresponding read-only device
- nodes for /sys/class/mtd/mtdX/ . They are only created
- (for the benefit of udev) if CONFIG_MTD_CHAR is enabled.
+ nodes for /sys/class/mtd/mtdX/ .
What: /sys/class/mtd/mtdX/dev
Date: April 2009
drivers/acpi/acpi_platform.c. This limitation is only for the platform
devices, SPI and I2C devices are created automatically as described below.
+DMA support
+~~~~~~~~~~~
+DMA controllers enumerated via ACPI should be registered in the system to
+provide generic access to their resources. For example, a driver that would
+like to be accessible to slave devices via generic API call
+dma_request_slave_channel() must register itself at the end of the probe
+function like this:
+
+ err = devm_acpi_dma_controller_register(dev, xlate_func, dw);
+ /* Handle the error if it's not a case of !CONFIG_ACPI */
+
+and implement custom xlate function if needed (usually acpi_dma_simple_xlate()
+is enough) which converts the FixedDMA resource provided by struct
+acpi_dma_spec into the corresponding DMA channel. A piece of code for that case
+could look like:
+
+ #ifdef CONFIG_ACPI
+ struct filter_args {
+ /* Provide necessary information for the filter_func */
+ ...
+ };
+
+ static bool filter_func(struct dma_chan *chan, void *param)
+ {
+ /* Choose the proper channel */
+ ...
+ }
+
+ static struct dma_chan *xlate_func(struct acpi_dma_spec *dma_spec,
+ struct acpi_dma *adma)
+ {
+ dma_cap_mask_t cap;
+ struct filter_args args;
+
+ /* Prepare arguments for filter_func */
+ ...
+ return dma_request_channel(cap, filter_func, &args);
+ }
+ #else
+ static struct dma_chan *xlate_func(struct acpi_dma_spec *dma_spec,
+ struct acpi_dma *adma)
+ {
+ return NULL;
+ }
+ #endif
+
+dma_request_slave_channel() will call xlate_func() for each registered DMA
+controller. In the xlate function the proper channel must be chosen based on
+information in struct acpi_dma_spec and the properties of the controller
+provided by struct acpi_dma.
+
+Clients must call dma_request_slave_channel() with the string parameter that
+corresponds to a specific FixedDMA resource. By default "tx" means the first
+entry of the FixedDMA resource array, "rx" means the second entry. The table
+below shows a layout:
+
+ Device (I2C0)
+ {
+ ...
+ Method (_CRS, 0, NotSerialized)
+ {
+ Name (DBUF, ResourceTemplate ()
+ {
+ FixedDMA (0x0018, 0x0004, Width32bit, _Y48)
+ FixedDMA (0x0019, 0x0005, Width32bit, )
+ })
+ ...
+ }
+ }
+
+So, the FixedDMA with request line 0x0018 is "tx" and next one is "rx" in
+this example.
+
+In robust cases the client unfortunately needs to call
+acpi_dma_request_slave_chan_by_index() directly and therefore choose the
+specific FixedDMA resource by its index.
+
SPI serial bus support
~~~~~~~~~~~~~~~~~~~~~~
Slave devices behind SPI bus have SpiSerialBus resource attached to them.
* Atmel Direct Memory Access Controller (DMA)
Required properties:
-- compatible: Should be "atmel,<chip>-dma"
-- reg: Should contain DMA registers location and length
-- interrupts: Should contain DMA interrupt
+- compatible: Should be "atmel,<chip>-dma".
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain DMA interrupt.
+- #dma-cells: Must be <2>, used to represent the number of integer cells in
+the dmas property of client devices.
-Examples:
+Example:
-dma@ffffec00 {
+dma0: dma@ffffec00 {
compatible = "atmel,at91sam9g45-dma";
reg = <0xffffec00 0x200>;
interrupts = <21>;
+ #dma-cells = <2>;
+};
+
+DMA clients connected to the Atmel DMA controller must use the format
+described in the dma.txt file, using a three-cell specifier for each channel:
+a phandle plus two interger cells.
+The three cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. The memory interface (16 most significant bits), the peripheral interface
+(16 less significant bits).
+3. The peripheral identifier for the hardware handshaking interface. The
+identifier can be different for tx and rx.
+
+Example:
+
+i2c0@i2c@f8010000 {
+ compatible = "atmel,at91sam9x5-i2c";
+ reg = <0xf8010000 0x100>;
+ interrupts = <9 4 6>;
+ dmas = <&dma0 1 7>,
+ <&dma0 1 8>;
+ dma-names = "tx", "rx";
};
nand-bus-width = <16>;
ti,nand-ecc-opt = "bch8";
- gpmc,sync-clk = <0>;
- gpmc,cs-on = <0>;
- gpmc,cs-rd-off = <44>;
- gpmc,cs-wr-off = <44>;
- gpmc,adv-on = <6>;
- gpmc,adv-rd-off = <34>;
- gpmc,adv-wr-off = <44>;
- gpmc,we-off = <40>;
- gpmc,oe-off = <54>;
- gpmc,access = <64>;
- gpmc,rd-cycle = <82>;
- gpmc,wr-cycle = <82>;
- gpmc,wr-access = <40>;
- gpmc,wr-data-mux-bus = <0>;
+ gpmc,sync-clk-ps = <0>;
+ gpmc,cs-on-ns = <0>;
+ gpmc,cs-rd-off-ns = <44>;
+ gpmc,cs-wr-off-ns = <44>;
+ gpmc,adv-on-ns = <6>;
+ gpmc,adv-rd-off-ns = <34>;
+ gpmc,adv-wr-off-ns = <44>;
+ gpmc,we-off-ns = <40>;
+ gpmc,oe-off-ns = <54>;
+ gpmc,access-ns = <64>;
+ gpmc,rd-cycle-ns = <82>;
+ gpmc,wr-cycle-ns = <82>;
+ gpmc,wr-access-ns = <40>;
+ gpmc,wr-data-mux-bus-ns = <0>;
#address-cells = <1>;
#size-cells = <1>;
used for what purposes, but which don't use an on-flash partition table such
as RedBoot.
-#address-cells & #size-cells must both be present in the mtd device and be
-equal to 1.
+#address-cells & #size-cells must both be present in the mtd device. There are
+two valid values for both:
+<1>: for partitions that require a single 32-bit cell to represent their
+ size/address (aka the value is below 4 GiB)
+<2>: for partitions that require two 32-bit cells to represent their
+ size/address (aka the value is 4 GiB or greater).
Required properties:
- reg : The partition's offset and size within the mtd bank.
reg = <0x0100000 0x200000>;
};
};
+
+flash@1 {
+ #address-cells = <1>;
+ #size-cells = <2>;
+
+ /* a 4 GiB partition */
+ partition@0 {
+ label = "filesystem";
+ reg = <0x00000000 0x1 0x00000000>;
+ };
+};
+
+flash@2 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ /* an 8 GiB partition */
+ partition@0 {
+ label = "filesystem #1";
+ reg = <0x0 0x00000000 0x2 0x00000000>;
+ };
+
+ /* a 4 GiB partition */
+ partition@200000000 {
+ label = "filesystem #2";
+ reg = <0x2 0x00000000 0x1 0x00000000>;
+ };
+};
- bank-width: Address width of the device in bytes. GPMC supports 8-bit
and 16-bit devices and so must be either 1 or 2 bytes.
- compatible: Compatible string property for the ethernet child device.
-- gpmc,cs-on: Chip-select assertion time
-- gpmc,cs-rd-off: Chip-select de-assertion time for reads
-- gpmc,cs-wr-off: Chip-select de-assertion time for writes
-- gpmc,oe-on: Output-enable assertion time
-- gpmc,oe-off Output-enable de-assertion time
-- gpmc,we-on: Write-enable assertion time
-- gpmc,we-off: Write-enable de-assertion time
-- gpmc,access: Start cycle to first data capture (read access)
-- gpmc,rd-cycle: Total read cycle time
-- gpmc,wr-cycle: Total write cycle time
+- gpmc,cs-on-ns: Chip-select assertion time
+- gpmc,cs-rd-off-ns: Chip-select de-assertion time for reads
+- gpmc,cs-wr-off-ns: Chip-select de-assertion time for writes
+- gpmc,oe-on-ns: Output-enable assertion time
+- gpmc,oe-off-ns: Output-enable de-assertion time
+- gpmc,we-on-ns: Write-enable assertion time
+- gpmc,we-off-ns: Write-enable de-assertion time
+- gpmc,access-ns: Start cycle to first data capture (read access)
+- gpmc,rd-cycle-ns: Total read cycle time
+- gpmc,wr-cycle-ns: Total write cycle time
- reg: Chip-select, base address (relative to chip-select)
and size of the memory mapped for the device.
Note that base address will be typically 0 as this
bank-width = <2>;
gpmc,mux-add-data;
- gpmc,cs-on = <0>;
- gpmc,cs-rd-off = <186>;
- gpmc,cs-wr-off = <186>;
- gpmc,adv-on = <12>;
- gpmc,adv-rd-off = <48>;
- gpmc,adv-wr-off = <48>;
- gpmc,oe-on = <54>;
- gpmc,oe-off = <168>;
- gpmc,we-on = <54>;
- gpmc,we-off = <168>;
- gpmc,rd-cycle = <186>;
- gpmc,wr-cycle = <186>;
- gpmc,access = <114>;
- gpmc,page-burst-access = <6>;
- gpmc,bus-turnaround = <12>;
- gpmc,cycle2cycle-delay = <18>;
- gpmc,wr-data-mux-bus = <90>;
- gpmc,wr-access = <186>;
+ gpmc,cs-on-ns = <0>;
+ gpmc,cs-rd-off-ns = <186>;
+ gpmc,cs-wr-off-ns = <186>;
+ gpmc,adv-on-ns = <12>;
+ gpmc,adv-rd-off-ns = <48>;
+ gpmc,adv-wr-off-ns = <48>;
+ gpmc,oe-on-ns = <54>;
+ gpmc,oe-off-ns = <168>;
+ gpmc,we-on-ns = <54>;
+ gpmc,we-off-ns = <168>;
+ gpmc,rd-cycle-ns = <186>;
+ gpmc,wr-cycle-ns = <186>;
+ gpmc,access-ns = <114>;
+ gpmc,page-burst-access-ns = <6>;
+ gpmc,bus-turnaround-ns = <12>;
+ gpmc,cycle2cycle-delay-ns = <18>;
+ gpmc,wr-data-mux-bus-ns = <90>;
+ gpmc,wr-access-ns = <186>;
gpmc,cycle2cycle-samecsen;
gpmc,cycle2cycle-diffcsen;
--- /dev/null
+* Marvell Armada 370/XP thermal management
+
+Required properties:
+
+- compatible: Should be set to one of the following:
+ marvell,armada370-thermal
+ marvell,armadaxp-thermal
+
+- reg: Device's register space.
+ Two entries are expected, see the examples below.
+ The first one is required for the sensor register;
+ the second one is required for the control register
+ to be used for sensor initialization (a.k.a. calibration).
+
+Example:
+
+ thermal@d0018300 {
+ compatible = "marvell,armada370-thermal";
+ reg = <0xd0018300 0x4
+ 0xd0018304 0x4>;
+ status = "okay";
+ };
--- /dev/null
+ DMA Test Guide
+ ==============
+
+ Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+This small document introduces how to test DMA drivers using dmatest module.
+
+ Part 1 - How to build the test module
+
+The menuconfig contains an option that could be found by following path:
+ Device Drivers -> DMA Engine support -> DMA Test client
+
+In the configuration file the option called CONFIG_DMATEST. The dmatest could
+be built as module or inside kernel. Let's consider those cases.
+
+ Part 2 - When dmatest is built as a module...
+
+After mounting debugfs and loading the module, the /sys/kernel/debug/dmatest
+folder with nodes will be created. They are the same as module parameters with
+addition of the 'run' node that controls run and stop phases of the test.
+
+Note that in this case test will not run on load automatically.
+
+Example of usage:
+ % echo dma0chan0 > /sys/kernel/debug/dmatest/channel
+ % echo 2000 > /sys/kernel/debug/dmatest/timeout
+ % echo 1 > /sys/kernel/debug/dmatest/iterations
+ % echo 1 > /sys/kernel/debug/dmatest/run
+
+Hint: available channel list could be extracted by running the following
+command:
+ % ls -1 /sys/class/dma/
+
+After a while you will start to get messages about current status or error like
+in the original code.
+
+Note that running a new test will stop any in progress test.
+
+The following command should return actual state of the test.
+ % cat /sys/kernel/debug/dmatest/run
+
+To wait for test done the user may perform a busy loop that checks the state.
+
+ % while [ $(cat /sys/kernel/debug/dmatest/run) = "Y" ]
+ > do
+ > echo -n "."
+ > sleep 1
+ > done
+ > echo
+
+ Part 3 - When built-in in the kernel...
+
+The module parameters that is supplied to the kernel command line will be used
+for the first performed test. After user gets a control, the test could be
+interrupted or re-run with same or different parameters. For the details see
+the above section "Part 2 - When dmatest is built as a module..."
+
+In both cases the module parameters are used as initial values for the test case.
+You always could check them at run-time by running
+ % grep -H . /sys/module/dmatest/parameters/*
+
+ Part 4 - Gathering the test results
+
+The module provides a storage for the test results in the memory. The gathered
+data could be used after test is done.
+
+The special file 'results' in the debugfs represents gathered data of the in
+progress test. The messages collected are printed to the kernel log as well.
+
+Example of output:
+ % cat /sys/kernel/debug/dmatest/results
+ dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0)
+
+The message format is unified across the different types of errors. A number in
+the parens represents additional information, e.g. error code, error counter,
+or status.
+
+Comparison between buffers is stored to the dedicated structure.
+
+Note that the verify result is now accessible only via file 'results' in the
+debugfs.
- BTRFS
- =====
+BTRFS
+=====
-Btrfs is a new copy on write filesystem for Linux aimed at
+Btrfs is a copy on write filesystem for Linux aimed at
implementing advanced features while focusing on fault tolerance,
repair and easy administration. Initially developed by Oracle, Btrfs
is licensed under the GPL and open for contribution from anyone.
* Online filesystem defragmentation
+Mount Options
+=============
- MAILING LIST
- ============
+When mounting a btrfs filesystem, the following option are accepted.
+Unless otherwise specified, all options default to off.
+
+ alloc_start=<bytes>
+ Debugging option to force all block allocations above a certain
+ byte threshold on each block device. The value is specified in
+ bytes, optionally with a K, M, or G suffix, case insensitive.
+ Default is 1MB.
+
+ autodefrag
+ Detect small random writes into files and queue them up for the
+ defrag process. Works best for small files; Not well suited for
+ large database workloads.
+
+ check_int
+ check_int_data
+ check_int_print_mask=<value>
+ These debugging options control the behavior of the integrity checking
+ module (the BTRFS_FS_CHECK_INTEGRITY config option required).
+
+ check_int enables the integrity checker module, which examines all
+ block write requests to ensure on-disk consistency, at a large
+ memory and CPU cost.
+
+ check_int_data includes extent data in the integrity checks, and
+ implies the check_int option.
+
+ check_int_print_mask takes a bitmask of BTRFSIC_PRINT_MASK_* values
+ as defined in fs/btrfs/check-integrity.c, to control the integrity
+ checker module behavior.
+
+ See comments at the top of fs/btrfs/check-integrity.c for more info.
+
+ compress
+ compress=<type>
+ compress-force
+ compress-force=<type>
+ Control BTRFS file data compression. Type may be specified as "zlib"
+ "lzo" or "no" (for no compression, used for remounting). If no type
+ is specified, zlib is used. If compress-force is specified,
+ all files will be compressed, whether or not they compress well.
+ If compression is enabled, nodatacow and nodatasum are disabled.
+
+ degraded
+ Allow mounts to continue with missing devices. A read-write mount may
+ fail with too many devices missing, for example if a stripe member
+ is completely missing.
+
+ device=<devicepath>
+ Specify a device during mount so that ioctls on the control device
+ can be avoided. Especialy useful when trying to mount a multi-device
+ setup as root. May be specified multiple times for multiple devices.
+
+ discard
+ Issue frequent commands to let the block device reclaim space freed by
+ the filesystem. This is useful for SSD devices, thinly provisioned
+ LUNs and virtual machine images, but may have a significant
+ performance impact. (The fstrim command is also available to
+ initiate batch trims from userspace).
+
+ enospc_debug
+ Debugging option to be more verbose in some ENOSPC conditions.
+
+ fatal_errors=<action>
+ Action to take when encountering a fatal error:
+ "bug" - BUG() on a fatal error. This is the default.
+ "panic" - panic() on a fatal error.
+
+ flushoncommit
+ The 'flushoncommit' mount option forces any data dirtied by a write in a
+ prior transaction to commit as part of the current commit. This makes
+ the committed state a fully consistent view of the file system from the
+ application's perspective (i.e., it includes all completed file system
+ operations). This was previously the behavior only when a snapshot is
+ created.
+
+ inode_cache
+ Enable free inode number caching. Defaults to off due to an overflow
+ problem when the free space crcs don't fit inside a single page.
+
+ max_inline=<bytes>
+ Specify the maximum amount of space, in bytes, that can be inlined in
+ a metadata B-tree leaf. The value is specified in bytes, optionally
+ with a K, M, or G suffix, case insensitive. In practice, this value
+ is limited by the root sector size, with some space unavailable due
+ to leaf headers. For a 4k sectorsize, max inline data is ~3900 bytes.
+
+ metadata_ratio=<value>
+ Specify that 1 metadata chunk should be allocated after every <value>
+ data chunks. Off by default.
+
+ noacl
+ Disable support for Posix Access Control Lists (ACLs). See the
+ acl(5) manual page for more information about ACLs.
+
+ nobarrier
+ Disables the use of block layer write barriers. Write barriers ensure
+ that certain IOs make it through the device cache and are on persistent
+ storage. If used on a device with a volatile (non-battery-backed)
+ write-back cache, this option will lead to filesystem corruption on a
+ system crash or power loss.
+
+ nodatacow
+ Disable data copy-on-write for newly created files. Implies nodatasum,
+ and disables all compression.
+
+ nodatasum
+ Disable data checksumming for newly created files.
+
+ notreelog
+ Disable the tree logging used for fsync and O_SYNC writes.
+
+ recovery
+ Enable autorecovery attempts if a bad tree root is found at mount time.
+ Currently this scans a list of several previous tree roots and tries to
+ use the first readable.
+
+ skip_balance
+ Skip automatic resume of interrupted balance operation after mount.
+ May be resumed with "btrfs balance resume."
+
+ space_cache (*)
+ Enable the on-disk freespace cache.
+ nospace_cache
+ Disable freespace cache loading without clearing the cache.
+ clear_cache
+ Force clearing and rebuilding of the disk space cache if something
+ has gone wrong.
+
+ ssd
+ nossd
+ ssd_spread
+ Options to control ssd allocation schemes. By default, BTRFS will
+ enable or disable ssd allocation heuristics depending on whether a
+ rotational or nonrotational disk is in use. The ssd and nossd options
+ can override this autodetection.
+
+ The ssd_spread mount option attempts to allocate into big chunks
+ of unused space, and may perform better on low-end ssds. ssd_spread
+ implies ssd, enabling all other ssd heuristics as well.
+
+ subvol=<path>
+ Mount subvolume at <path> rather than the root subvolume. <path> is
+ relative to the top level subvolume.
+
+ subvolid=<ID>
+ Mount subvolume specified by an ID number rather than the root subvolume.
+ This allows mounting of subvolumes which are not in the root of the mounted
+ filesystem.
+ You can use "btrfs subvolume list" to see subvolume ID numbers.
+
+ subvolrootid=<objectid> (deprecated)
+ Mount subvolume specified by <objectid> rather than the root subvolume.
+ This allows mounting of subvolumes which are not in the root of the mounted
+ filesystem.
+ You can use "btrfs subvolume show " to see the object ID for a subvolume.
+
+ thread_pool=<number>
+ The number of worker threads to allocate. The default number is equal
+ to the number of CPUs + 2, or 8, whichever is smaller.
+
+ user_subvol_rm_allowed
+ Allow subvolumes to be deleted by a non-root user. Use with caution.
+
+MAILING LIST
+============
There is a Btrfs mailing list hosted on vger.kernel.org. You can
find details on how to subscribe here:
- IRC
- ===
+IRC
+===
Discussion of Btrfs also occurs on the #btrfs channel of the Freenode
IRC network.
not care how it's implemented.)
That said, if the convention is supported on their platform, drivers should
-use it when possible. Platforms must declare GENERIC_GPIO support in their
-Kconfig (boolean true), and provide an <asm/gpio.h> file. Drivers that can't
-work without standard GPIO calls should have Kconfig entries which depend
-on GENERIC_GPIO. The GPIO calls are available, either as "real code" or as
-optimized-away stubs, when drivers use the include file:
+use it when possible. Platforms must select ARCH_REQUIRE_GPIOLIB or
+ARCH_WANT_OPTIONAL_GPIOLIB in their Kconfig. Drivers that can't work without
+standard GPIO calls should have Kconfig entries which depend on GPIOLIB. The
+GPIO calls are available, either as "real code" or as optimized-away stubs,
+when drivers use the include file:
#include <linux/gpio.h>
manually with software code and TMU will read current temperature from user value not from
sensor's value.
-Enabling CONFIG_EXYNOS_THERMAL_EMUL option will make this support in available.
-When it's enabled, sysfs node will be created under
-/sys/bus/platform/devices/'exynos device name'/ with name of 'emulation'.
+Enabling CONFIG_THERMAL_EMULATION option will make this support available.
+When it's enabled, sysfs node will be created as
+/sys/devices/virtual/thermal/thermal_zone'zone id'/emul_temp.
-The sysfs node, 'emulation', will contain value 0 for the initial state. When you input any
+The sysfs node, 'emul_node', will contain value 0 for the initial state. When you input any
temperature you want to update to sysfs node, it automatically enable emulation mode and
current temperature will be changed into it.
(Exynos also supports user changable delay time which would be used to delay of
1. thermal sysfs driver interface functions
1.1 thermal zone device interface
-1.1.1 struct thermal_zone_device *thermal_zone_device_register(char *name,
+1.1.1 struct thermal_zone_device *thermal_zone_device_register(char *type,
int trips, int mask, void *devdata,
- struct thermal_zone_device_ops *ops)
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp,
+ int passive_delay, int polling_delay))
This interface function adds a new thermal zone device (sensor) to
/sys/class/thermal folder as thermal_zone[0-*]. It tries to bind all the
thermal cooling devices registered at the same time.
- name: the thermal zone name.
+ type: the thermal zone type.
trips: the total number of trip points this thermal zone supports.
mask: Bit string: If 'n'th bit is set, then trip point 'n' is writeable.
devdata: device private data
will be fired.
.set_emul_temp: set the emulation temperature which helps in debugging
different threshold temperature points.
+ tzp: thermal zone platform parameters.
+ passive_delay: number of milliseconds to wait between polls when
+ performing passive cooling.
+ polling_delay: number of milliseconds to wait between polls when checking
+ whether trip points have been crossed (0 for interrupt driven systems).
+
1.1.2 void thermal_zone_device_unregister(struct thermal_zone_device *tz)
Unit: millidegree Celsius
WO, Optional
+ WARNING: Be careful while enabling this option on production systems,
+ because userland can easily disable the thermal policy by simply
+ flooding this sysfs node with low temperature values.
+
*****************************
* Cooling device attributes *
*****************************
{thermal_zone, cooling_device, trip_point} combination. Returns NULL
if such an instance does not exist.
-5.3:notify_thermal_framework:
+5.3:thermal_notify_framework:
This function handles the trip events from sensor drivers. It starts
throttling the cooling devices according to the policy configured.
For CRITICAL and HOT trip points, this notifies the respective drivers,
This function serves as an arbitrator to set the state of a cooling
device. It sets the cooling device to the deepest cooling state if
possible.
-
-5.5:thermal_register_governor:
-This function lets the various thermal governors to register themselves
-with the Thermal framework. At run time, depending on a zone's platform
-data, a particular governor is used for throttling.
-
-5.6:thermal_unregister_governor:
-This function unregisters a governor from the thermal framework.
--- /dev/null
+MMUv3 initialization sequence.
+
+The code in the initialize_mmu macro sets up MMUv3 memory mapping
+identically to MMUv2 fixed memory mapping. Depending on
+CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX symbol this code is
+located in one of the following address ranges:
+
+ 0xF0000000..0xFFFFFFFF (will keep same address in MMU v2 layout;
+ typically ROM)
+ 0x00000000..0x07FFFFFF (system RAM; this code is actually linked
+ at 0xD0000000..0xD7FFFFFF [cached]
+ or 0xD8000000..0xDFFFFFFF [uncached];
+ in any case, initially runs elsewhere
+ than linked, so have to be careful)
+
+The code has the following assumptions:
+ This code fragment is run only on an MMU v3.
+ TLBs are in their reset state.
+ ITLBCFG and DTLBCFG are zero (reset state).
+ RASID is 0x04030201 (reset state).
+ PS.RING is zero (reset state).
+ LITBASE is zero (reset state, PC-relative literals); required to be PIC.
+
+TLB setup proceeds along the following steps.
+
+ Legend:
+ VA = virtual address (two upper nibbles of it);
+ PA = physical address (two upper nibbles of it);
+ pc = physical range that contains this code;
+
+After step 2, we jump to virtual address in 0x40000000..0x5fffffff
+that corresponds to next instruction to execute in this code.
+After step 4, we jump to intended (linked) address of this code.
+
+ Step 0 Step1 Step 2 Step3 Step 4 Step5
+ ============ ===== ============ ===== ============ =====
+ VA PA PA VA PA PA VA PA PA
+ ------ -- -- ------ -- -- ------ -- --
+ E0..FF -> E0 -> E0 E0..FF -> E0 F0..FF -> F0 -> F0
+ C0..DF -> C0 -> C0 C0..DF -> C0 E0..EF -> F0 -> F0
+ A0..BF -> A0 -> A0 A0..BF -> A0 D8..DF -> 00 -> 00
+ 80..9F -> 80 -> 80 80..9F -> 80 D0..D7 -> 00 -> 00
+ 60..7F -> 60 -> 60 60..7F -> 60
+ 40..5F -> 40 40..5F -> pc -> pc 40..5F -> pc
+ 20..3F -> 20 -> 20 20..3F -> 20
+ 00..1F -> 00 -> 00 00..1F -> 00
控制器的抽象函数来实现它。(有一些可选的代码能支持这种策略的实现,本文档
后面会介绍,但作为 GPIO 接口的客户端驱动程序必须与它的实现无关。)
-也就是说,如果在他们的平台上支持这个公约,驱动应尽可能的使用它。平台
-必须在 Kconfig 中声明对 GENERIC_GPIO的支持 (布尔型 true),并提供
-一个 <asm/gpio.h> 文件。那些调用标准 GPIO 函数的驱动应该在 Kconfig
-å\85¥å\8f£ä¸å£°æ\98\8eä¾\9dèµ\96GENERIC_GPIOã\80\82å½\93驱å\8a¨å\8c\85å\90«æ\96\87件:
+ä¹\9få°±æ\98¯è¯´,å¦\82æ\9e\9cå\9c¨ä»\96们ç\9a\84å¹³å\8f°ä¸\8aæ\94¯æ\8c\81è¿\99个å\85¬çº¦ï¼\8c驱å\8a¨åº\94å°½å\8f¯è\83½ç\9a\84使ç\94¨å®\83ã\80\82å\90\8cæ\97¶ï¼\8cå¹³å\8f°
+必须在 Kconfig 中选择 ARCH_REQUIRE_GPIOLIB 或者 ARCH_WANT_OPTIONAL_GPIOLIB
+选项。那些调用标准 GPIO 函数的驱动应该在 Kconfig 入口中声明依赖GENERIC_GPIO。
+当驱动包含文件:
#include <linux/gpio.h>
THERMAL
M: Zhang Rui <rui.zhang@intel.com>
+M: Eduardo Valentin <eduardo.valentin@ti.com>
L: linux-pm@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
+Q: https://patchwork.kernel.org/project/linux-pm/list/
S: Supported
F: drivers/thermal/
F: include/linux/thermal.h
+F: include/linux/cpu_cooling.h
THINGM BLINK(1) USB RGB LED DRIVER
M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
bool
default y
-config GENERIC_GPIO
- bool
-
config ZONE_DMA
bool
default y
select GENERIC_FIND_FIRST_BIT
# for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP
select GENERIC_IRQ_SHOW
- select GENERIC_KERNEL_EXECVE
- select GENERIC_KERNEL_THREAD
select GENERIC_PENDING_IRQ if SMP
select GENERIC_SMP_IDLE_THREAD
select HAVE_ARCH_KGDB
config GENERIC_HWEIGHT
def_bool y
-config BINFMT_ELF
- def_bool y
-
config STACKTRACE_SUPPORT
def_bool y
select STACKTRACE
menu "ARC Platform/SoC/Board"
source "arch/arc/plat-arcfpga/Kconfig"
+source "arch/arc/plat-tb10x/Kconfig"
#New platform adds here
endmenu
config ARC_HAS_COH_CACHES
def_bool n
-config ARC_HAS_COH_LLSC
- def_bool n
-
config ARC_HAS_COH_RTSC
def_bool n
based on actual usage of FPU by a task. Thus our implemn does
this for all tasks in system.
+config ARC_CANT_LLSC
+ def_bool n
+
menuconfig ARC_CPU_REL_4_10
bool "Enable support for Rel 4.10 features"
default n
config ARC_HAS_LLSC
bool "Insn: LLOCK/SCOND (efficient atomic ops)"
default y
- depends on ARC_CPU_770
- # if SMP, enable LLSC ONLY if ARC implementation has coherent atomics
- depends on !SMP || ARC_HAS_COH_LLSC
+ depends on ARC_CPU_770 && !ARC_CANT_LLSC
config ARC_HAS_SWAPE
bool "Insn: SWAPE (endian-swap)"
Counts number of I and D TLB Misses and exports them via Debugfs
The counters can be cleared via Debugfs as well
-config CMDLINE
- string "Kernel command line to built-in"
- default "print-fatal-signals=1"
- help
- The default command line which will be appended to the optional
- u-boot provided command line (see below)
-
config CMDLINE_UBOOT
bool "Support U-boot kernel command line passing"
default n
command line from the U-boot environment to the Linux kernel then
switch this option on.
ARC U-boot will setup the cmdline in RAM/flash and set r2 to point
- to it. kernel startup code will copy the string into cmdline buffer
- and also append CONFIG_CMDLINE.
+ to it. kernel startup code will append this to DeviceTree
+ /bootargs provided cmdline args.
config ARC_BUILTIN_DTB_NAME
string "Built in DTB"
source "kernel/Kconfig.preempt"
+menu "Executable file formats"
+source "fs/Kconfig.binfmt"
+endmenu
+
endmenu # "ARC Architecture Configuration"
source "mm/Kconfig"
UTS_MACHINE := arc
+ifeq ($(CROSS_COMPILE),)
+CROSS_COMPILE := arc-elf32-
+endif
+
KBUILD_DEFCONFIG := fpga_defconfig
cflags-y += -mA7 -fno-common -pipe -fno-builtin -D__linux__
core-y += arch/arc/boot/dts/
core-$(CONFIG_ARC_PLAT_FPGA_LEGACY) += arch/arc/plat-arcfpga/
+core-$(CONFIG_ARC_PLAT_TB10X) += arch/arc/plat-tb10x/
drivers-$(CONFIG_OPROFILE) += arch/arc/oprofile/
libs-y += arch/arc/lib/ $(LIBGCC)
+boot := arch/arc/boot
+
#default target for make without any arguements.
-KBUILD_IMAGE := bootpImage
+KBUILD_IMAGE := bootpImage
all: $(KBUILD_IMAGE)
-boot := arch/arc/boot
-
bootpImage: vmlinux
-uImage: vmlinux
+boot_targets += uImage uImage.bin uImage.gz
+
+$(boot_targets): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
%.dtb %.dtb.S %.dtb.o: scripts
# uImage build relies on mkimage being availble on your host for ARC target
# You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
# and make sure it's reacable from your PATH
-MKIMAGE := $(srctree)/scripts/mkuboot.sh
OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
UIMAGE_LOADADDR = $(CONFIG_LINUX_LINK_BASE)
UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
-UIMAGE_COMPRESSION = gzip
+
+suffix-y := bin
+suffix-$(CONFIG_KERNEL_GZIP) := gz
+
+targets += uImage uImage.bin uImage.gz
+extra-y += vmlinux.bin vmlinux.bin.gz
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
-$(obj)/uImage: $(obj)/vmlinux.bin.gz FORCE
- $(call if_changed,uimage)
+$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,uimage,none)
+
+$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
+ $(call if_changed,uimage,gzip)
-PHONY += FORCE
+$(obj)/uImage: $(obj)/uImage.$(suffix-y)
+ @ln -sf $(notdir $<) $@
+ @echo ' Image $@ is ready'
obj-y += $(builtindtb-y).dtb.o
targets += $(builtindtb-y).dtb
+.SECONDARY: $(obj)/$(builtindtb-y).dtb.S
+
dtbs: $(addprefix $(obj)/, $(builtindtb-y).dtb)
-clean-files := *.dtb
+clean-files := *.dtb *.dtb.S
--- /dev/null
+/*
+ * Abilis Systems TB100 SOC device tree
+ *
+ * Copyright (C) Abilis Systems 2013
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/include/ "abilis_tb10x.dtsi"
+
+/* interrupt specifiers
+ * --------------------
+ * 0: rising, 1: low, 2: high, 3: falling,
+ */
+
+/ {
+ clock-frequency = <500000000>; /* 500 MHZ */
+
+ soc100 {
+ bus-frequency = <166666666>;
+
+ pll0: oscillator {
+ clock-frequency = <1000000000>;
+ };
+ cpu_clk: clkdiv_cpu {
+ clock-mult = <1>;
+ clock-div = <2>;
+ };
+ ahb_clk: clkdiv_ahb {
+ clock-mult = <1>;
+ clock-div = <6>;
+ };
+
+ iomux: iomux@FF10601c {
+ /* Port 1 */
+ pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */
+ pingrp = "mis0_pins";
+ };
+ pctl_tsin_s1: pctl-tsin-s1 { /* Serial TS-in 1 */
+ pingrp = "mis1_pins";
+ };
+ pctl_gpio_a: pctl-gpio-a { /* GPIO bank A */
+ pingrp = "gpioa_pins";
+ };
+ pctl_tsin_p1: pctl-tsin-p1 { /* Parallel TS-in 1 */
+ pingrp = "mip1_pins";
+ };
+ /* Port 2 */
+ pctl_tsin_s2: pctl-tsin-s2 { /* Serial TS-in 2 */
+ pingrp = "mis2_pins";
+ };
+ pctl_tsin_s3: pctl-tsin-s3 { /* Serial TS-in 3 */
+ pingrp = "mis3_pins";
+ };
+ pctl_gpio_c: pctl-gpio-c { /* GPIO bank C */
+ pingrp = "gpioc_pins";
+ };
+ pctl_tsin_p3: pctl-tsin-p3 { /* Parallel TS-in 3 */
+ pingrp = "mip3_pins";
+ };
+ /* Port 3 */
+ pctl_tsin_s4: pctl-tsin-s4 { /* Serial TS-in 4 */
+ pingrp = "mis4_pins";
+ };
+ pctl_tsin_s5: pctl-tsin-s5 { /* Serial TS-in 5 */
+ pingrp = "mis5_pins";
+ };
+ pctl_gpio_e: pctl-gpio-e { /* GPIO bank E */
+ pingrp = "gpioe_pins";
+ };
+ pctl_tsin_p5: pctl-tsin-p5 { /* Parallel TS-in 5 */
+ pingrp = "mip5_pins";
+ };
+ /* Port 4 */
+ pctl_tsin_s6: pctl-tsin-s6 { /* Serial TS-in 6 */
+ pingrp = "mis6_pins";
+ };
+ pctl_tsin_s7: pctl-tsin-s7 { /* Serial TS-in 7 */
+ pingrp = "mis7_pins";
+ };
+ pctl_gpio_g: pctl-gpio-g { /* GPIO bank G */
+ pingrp = "gpiog_pins";
+ };
+ pctl_tsin_p7: pctl-tsin-p7 { /* Parallel TS-in 7 */
+ pingrp = "mip7_pins";
+ };
+ /* Port 5 */
+ pctl_gpio_j: pctl-gpio-j { /* GPIO bank J */
+ pingrp = "gpioj_pins";
+ };
+ pctl_gpio_k: pctl-gpio-k { /* GPIO bank K */
+ pingrp = "gpiok_pins";
+ };
+ pctl_ciplus: pctl-ciplus { /* CI+ interface */
+ pingrp = "ciplus_pins";
+ };
+ pctl_mcard: pctl-mcard { /* M-Card interface */
+ pingrp = "mcard_pins";
+ };
+ /* Port 6 */
+ pctl_tsout_p: pctl-tsout-p { /* Parallel TS-out */
+ pingrp = "mop_pins";
+ };
+ pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */
+ pingrp = "mos0_pins";
+ };
+ pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */
+ pingrp = "mos1_pins";
+ };
+ pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */
+ pingrp = "mos2_pins";
+ };
+ pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */
+ pingrp = "mos3_pins";
+ };
+ /* Port 7 */
+ pctl_uart0: pctl-uart0 { /* UART 0 */
+ pingrp = "uart0_pins";
+ };
+ pctl_uart1: pctl-uart1 { /* UART 1 */
+ pingrp = "uart1_pins";
+ };
+ pctl_gpio_l: pctl-gpio-l { /* GPIO bank L */
+ pingrp = "gpiol_pins";
+ };
+ pctl_gpio_m: pctl-gpio-m { /* GPIO bank M */
+ pingrp = "gpiom_pins";
+ };
+ /* Port 8 */
+ pctl_spi3: pctl-spi3 {
+ pingrp = "spi3_pins";
+ };
+ /* Port 9 */
+ pctl_spi1: pctl-spi1 {
+ pingrp = "spi1_pins";
+ };
+ pctl_gpio_n: pctl-gpio-n {
+ pingrp = "gpion_pins";
+ };
+ /* Unmuxed GPIOs */
+ pctl_gpio_b: pctl-gpio-b {
+ pingrp = "gpiob_pins";
+ };
+ pctl_gpio_d: pctl-gpio-d {
+ pingrp = "gpiod_pins";
+ };
+ pctl_gpio_f: pctl-gpio-f {
+ pingrp = "gpiof_pins";
+ };
+ pctl_gpio_h: pctl-gpio-h {
+ pingrp = "gpioh_pins";
+ };
+ pctl_gpio_i: pctl-gpio-i {
+ pingrp = "gpioi_pins";
+ };
+ };
+
+ gpioa: gpio@FF140000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF140000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <0>;
+ gpio-pins = <&pctl_gpio_a>;
+ };
+ gpiob: gpio@FF141000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF141000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <3>;
+ gpio-pins = <&pctl_gpio_b>;
+ };
+ gpioc: gpio@FF142000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF142000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <5>;
+ gpio-pins = <&pctl_gpio_c>;
+ };
+ gpiod: gpio@FF143000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF143000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <8>;
+ gpio-pins = <&pctl_gpio_d>;
+ };
+ gpioe: gpio@FF144000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF144000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <10>;
+ gpio-pins = <&pctl_gpio_e>;
+ };
+ gpiof: gpio@FF145000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF145000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <13>;
+ gpio-pins = <&pctl_gpio_f>;
+ };
+ gpiog: gpio@FF146000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF146000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <15>;
+ gpio-pins = <&pctl_gpio_g>;
+ };
+ gpioh: gpio@FF147000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF147000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <18>;
+ gpio-pins = <&pctl_gpio_h>;
+ };
+ gpioi: gpio@FF148000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF148000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <20>;
+ gpio-pins = <&pctl_gpio_i>;
+ };
+ gpioj: gpio@FF149000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF149000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <32>;
+ gpio-pins = <&pctl_gpio_j>;
+ };
+ gpiok: gpio@FF14a000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF14A000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <64>;
+ gpio-pins = <&pctl_gpio_k>;
+ };
+ gpiol: gpio@FF14b000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF14B000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <86>;
+ gpio-pins = <&pctl_gpio_l>;
+ };
+ gpiom: gpio@FF14c000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF14C000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <90>;
+ gpio-pins = <&pctl_gpio_m>;
+ };
+ gpion: gpio@FF14d000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF14D000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <94>;
+ gpio-pins = <&pctl_gpio_n>;
+ };
+ };
+};
--- /dev/null
+/*
+ * Abilis Systems TB100 Development Kit PCB device tree
+ *
+ * Copyright (C) Abilis Systems 2013
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/dts-v1/;
+
+/include/ "abilis_tb100.dtsi"
+
+/ {
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32,0xff100000,9600n8 console=ttyS0,9600n8";
+ };
+
+ aliases { };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x08000000>; /* 128M */
+ };
+
+ soc100 {
+ uart@FF100000 {
+ pinctrl-names = "abilis,simple-default";
+ pinctrl-0 = <&pctl_uart0>;
+ };
+ ethernet@FE100000 {
+ phy-mode = "rgmii";
+ };
+
+ i2c0: i2c@FF120000 {
+ sda-hold-time = <432>;
+ };
+ i2c1: i2c@FF121000 {
+ sda-hold-time = <432>;
+ };
+ i2c2: i2c@FF122000 {
+ sda-hold-time = <432>;
+ };
+ i2c3: i2c@FF123000 {
+ sda-hold-time = <432>;
+ };
+ i2c4: i2c@FF124000 {
+ sda-hold-time = <432>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ power {
+ label = "Power";
+ gpios = <&gpioi 0>;
+ linux,default-trigger = "default-on";
+ };
+ heartbeat {
+ label = "Heartbeat";
+ gpios = <&gpioi 1>;
+ linux,default-trigger = "heartbeat";
+ };
+ led2 {
+ label = "LED2";
+ gpios = <&gpioi 2>;
+ default-state = "off";
+ };
+ led3 {
+ label = "LED3";
+ gpios = <&gpioi 3>;
+ default-state = "off";
+ };
+ led4 {
+ label = "LED4";
+ gpios = <&gpioi 4>;
+ default-state = "off";
+ };
+ led5 {
+ label = "LED5";
+ gpios = <&gpioi 5>;
+ default-state = "off";
+ };
+ led6 {
+ label = "LED6";
+ gpios = <&gpioi 6>;
+ default-state = "off";
+ };
+ led7 {
+ label = "LED7";
+ gpios = <&gpioi 7>;
+ default-state = "off";
+ };
+ led8 {
+ label = "LED8";
+ gpios = <&gpioi 8>;
+ default-state = "off";
+ };
+ led9 {
+ label = "LED9";
+ gpios = <&gpioi 9>;
+ default-state = "off";
+ };
+ led10 {
+ label = "LED10";
+ gpios = <&gpioi 10>;
+ default-state = "off";
+ };
+ led11 {
+ label = "LED11";
+ gpios = <&gpioi 11>;
+ default-state = "off";
+ };
+ };
+ };
+};
--- /dev/null
+/*
+ * Abilis Systems TB101 SOC device tree
+ *
+ * Copyright (C) Abilis Systems 2013
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/include/ "abilis_tb10x.dtsi"
+
+/* interrupt specifiers
+ * --------------------
+ * 0: rising, 1: low, 2: high, 3: falling,
+ */
+
+/ {
+ clock-frequency = <500000000>; /* 500 MHZ */
+
+ soc100 {
+ bus-frequency = <166666666>;
+
+ pll0: oscillator {
+ clock-frequency = <1000000000>;
+ };
+ cpu_clk: clkdiv_cpu {
+ clock-mult = <1>;
+ clock-div = <2>;
+ };
+ ahb_clk: clkdiv_ahb {
+ clock-mult = <1>;
+ clock-div = <6>;
+ };
+
+ iomux: iomux@FF10601c {
+ /* Port 1 */
+ pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */
+ pingrp = "mis0_pins";
+ };
+ pctl_tsin_s1: pctl-tsin-s1 { /* Serial TS-in 1 */
+ pingrp = "mis1_pins";
+ };
+ pctl_gpio_a: pctl-gpio-a { /* GPIO bank A */
+ pingrp = "gpioa_pins";
+ };
+ pctl_tsin_p1: pctl-tsin-p1 { /* Parallel TS-in 1 */
+ pingrp = "mip1_pins";
+ };
+ /* Port 2 */
+ pctl_tsin_s2: pctl-tsin-s2 { /* Serial TS-in 2 */
+ pingrp = "mis2_pins";
+ };
+ pctl_tsin_s3: pctl-tsin-s3 { /* Serial TS-in 3 */
+ pingrp = "mis3_pins";
+ };
+ pctl_gpio_c: pctl-gpio-c { /* GPIO bank C */
+ pingrp = "gpioc_pins";
+ };
+ pctl_tsin_p3: pctl-tsin-p3 { /* Parallel TS-in 3 */
+ pingrp = "mip3_pins";
+ };
+ /* Port 3 */
+ pctl_tsin_s4: pctl-tsin-s4 { /* Serial TS-in 4 */
+ pingrp = "mis4_pins";
+ };
+ pctl_tsin_s5: pctl-tsin-s5 { /* Serial TS-in 5 */
+ pingrp = "mis5_pins";
+ };
+ pctl_gpio_e: pctl-gpio-e { /* GPIO bank E */
+ pingrp = "gpioe_pins";
+ };
+ pctl_tsin_p5: pctl-tsin-p5 { /* Parallel TS-in 5 */
+ pingrp = "mip5_pins";
+ };
+ /* Port 4 */
+ pctl_tsin_s6: pctl-tsin-s6 { /* Serial TS-in 6 */
+ pingrp = "mis6_pins";
+ };
+ pctl_tsin_s7: pctl-tsin-s7 { /* Serial TS-in 7 */
+ pingrp = "mis7_pins";
+ };
+ pctl_gpio_g: pctl-gpio-g { /* GPIO bank G */
+ pingrp = "gpiog_pins";
+ };
+ pctl_tsin_p7: pctl-tsin-p7 { /* Parallel TS-in 7 */
+ pingrp = "mip7_pins";
+ };
+ /* Port 5 */
+ pctl_gpio_j: pctl-gpio-j { /* GPIO bank J */
+ pingrp = "gpioj_pins";
+ };
+ pctl_gpio_k: pctl-gpio-k { /* GPIO bank K */
+ pingrp = "gpiok_pins";
+ };
+ pctl_ciplus: pctl-ciplus { /* CI+ interface */
+ pingrp = "ciplus_pins";
+ };
+ pctl_mcard: pctl-mcard { /* M-Card interface */
+ pingrp = "mcard_pins";
+ };
+ pctl_stc0: pctl-stc0 { /* Smart card I/F 0 */
+ pingrp = "stc0_pins";
+ };
+ pctl_stc1: pctl-stc1 { /* Smart card I/F 1 */
+ pingrp = "stc1_pins";
+ };
+ /* Port 6 */
+ pctl_tsout_p: pctl-tsout-p { /* Parallel TS-out */
+ pingrp = "mop_pins";
+ };
+ pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */
+ pingrp = "mos0_pins";
+ };
+ pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */
+ pingrp = "mos1_pins";
+ };
+ pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */
+ pingrp = "mos2_pins";
+ };
+ pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */
+ pingrp = "mos3_pins";
+ };
+ /* Port 7 */
+ pctl_uart0: pctl-uart0 { /* UART 0 */
+ pingrp = "uart0_pins";
+ };
+ pctl_uart1: pctl-uart1 { /* UART 1 */
+ pingrp = "uart1_pins";
+ };
+ pctl_gpio_l: pctl-gpio-l { /* GPIO bank L */
+ pingrp = "gpiol_pins";
+ };
+ pctl_gpio_m: pctl-gpio-m { /* GPIO bank M */
+ pingrp = "gpiom_pins";
+ };
+ /* Port 8 */
+ pctl_spi3: pctl-spi3 {
+ pingrp = "spi3_pins";
+ };
+ pctl_jtag: pctl-jtag {
+ pingrp = "jtag_pins";
+ };
+ /* Port 9 */
+ pctl_spi1: pctl-spi1 {
+ pingrp = "spi1_pins";
+ };
+ pctl_gpio_n: pctl-gpio-n {
+ pingrp = "gpion_pins";
+ };
+ /* Unmuxed GPIOs */
+ pctl_gpio_b: pctl-gpio-b {
+ pingrp = "gpiob_pins";
+ };
+ pctl_gpio_d: pctl-gpio-d {
+ pingrp = "gpiod_pins";
+ };
+ pctl_gpio_f: pctl-gpio-f {
+ pingrp = "gpiof_pins";
+ };
+ pctl_gpio_h: pctl-gpio-h {
+ pingrp = "gpioh_pins";
+ };
+ pctl_gpio_i: pctl-gpio-i {
+ pingrp = "gpioi_pins";
+ };
+ };
+
+ gpioa: gpio@FF140000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF140000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <0>;
+ gpio-pins = <&pctl_gpio_a>;
+ };
+ gpiob: gpio@FF141000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF141000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <3>;
+ gpio-pins = <&pctl_gpio_b>;
+ };
+ gpioc: gpio@FF142000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF142000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <5>;
+ gpio-pins = <&pctl_gpio_c>;
+ };
+ gpiod: gpio@FF143000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF143000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <8>;
+ gpio-pins = <&pctl_gpio_d>;
+ };
+ gpioe: gpio@FF144000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF144000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <10>;
+ gpio-pins = <&pctl_gpio_e>;
+ };
+ gpiof: gpio@FF145000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF145000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <13>;
+ gpio-pins = <&pctl_gpio_f>;
+ };
+ gpiog: gpio@FF146000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF146000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <15>;
+ gpio-pins = <&pctl_gpio_g>;
+ };
+ gpioh: gpio@FF147000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF147000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <18>;
+ gpio-pins = <&pctl_gpio_h>;
+ };
+ gpioi: gpio@FF148000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF148000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <20>;
+ gpio-pins = <&pctl_gpio_i>;
+ };
+ gpioj: gpio@FF149000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF149000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <32>;
+ gpio-pins = <&pctl_gpio_j>;
+ };
+ gpiok: gpio@FF14a000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF14A000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <64>;
+ gpio-pins = <&pctl_gpio_k>;
+ };
+ gpiol: gpio@FF14b000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF14B000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <86>;
+ gpio-pins = <&pctl_gpio_l>;
+ };
+ gpiom: gpio@FF14c000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF14C000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <90>;
+ gpio-pins = <&pctl_gpio_m>;
+ };
+ gpion: gpio@FF14d000 {
+ compatible = "abilis,tb10x-gpio";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <27 1>;
+ reg = <0xFF14D000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <1>;
+ gpio-base = <94>;
+ gpio-pins = <&pctl_gpio_n>;
+ };
+ };
+};
--- /dev/null
+/*
+ * Abilis Systems TB101 Development Kit PCB device tree
+ *
+ * Copyright (C) Abilis Systems 2013
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/dts-v1/;
+
+/include/ "abilis_tb101.dtsi"
+
+/ {
+ chosen {
+ bootargs = "earlycon=uart8250,mmio32,0xff100000,9600n8 console=ttyS0,9600n8";
+ };
+
+ aliases { };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x08000000>; /* 128M */
+ };
+
+ soc100 {
+ uart@FF100000 {
+ pinctrl-names = "abilis,simple-default";
+ pinctrl-0 = <&pctl_uart0>;
+ };
+ ethernet@FE100000 {
+ phy-mode = "rgmii";
+ };
+
+ i2c0: i2c@FF120000 {
+ sda-hold-time = <432>;
+ };
+ i2c1: i2c@FF121000 {
+ sda-hold-time = <432>;
+ };
+ i2c2: i2c@FF122000 {
+ sda-hold-time = <432>;
+ };
+ i2c3: i2c@FF123000 {
+ sda-hold-time = <432>;
+ };
+ i2c4: i2c@FF124000 {
+ sda-hold-time = <432>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ power {
+ label = "Power";
+ gpios = <&gpioi 0>;
+ linux,default-trigger = "default-on";
+ };
+ heartbeat {
+ label = "Heartbeat";
+ gpios = <&gpioi 1>;
+ linux,default-trigger = "heartbeat";
+ };
+ led2 {
+ label = "LED2";
+ gpios = <&gpioi 2>;
+ default-state = "off";
+ };
+ led3 {
+ label = "LED3";
+ gpios = <&gpioi 3>;
+ default-state = "off";
+ };
+ led4 {
+ label = "LED4";
+ gpios = <&gpioi 4>;
+ default-state = "off";
+ };
+ led5 {
+ label = "LED5";
+ gpios = <&gpioi 5>;
+ default-state = "off";
+ };
+ led6 {
+ label = "LED6";
+ gpios = <&gpioi 6>;
+ default-state = "off";
+ };
+ led7 {
+ label = "LED7";
+ gpios = <&gpioi 7>;
+ default-state = "off";
+ };
+ led8 {
+ label = "LED8";
+ gpios = <&gpioi 8>;
+ default-state = "off";
+ };
+ led9 {
+ label = "LED9";
+ gpios = <&gpioi 9>;
+ default-state = "off";
+ };
+ led10 {
+ label = "LED10";
+ gpios = <&gpioi 10>;
+ default-state = "off";
+ };
+ led11 {
+ label = "LED11";
+ gpios = <&gpioi 11>;
+ default-state = "off";
+ };
+ };
+ };
+};
--- /dev/null
+/*
+ * Abilis Systems TB10X SOC device tree
+ *
+ * Copyright (C) Abilis Systems 2013
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* interrupt specifiers
+ * --------------------
+ * 0: rising, 1: low, 2: high, 3: falling,
+ */
+
+/ {
+ compatible = "abilis,arc-tb10x";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "snps,arc770d";
+ reg = <0>;
+ };
+ };
+
+ soc100 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ ranges = <0xfe000000 0xfe000000 0x02000000
+ 0x000F0000 0x000F0000 0x00010000>;
+ compatible = "abilis,tb10x", "simple-bus";
+
+ pll0: oscillator {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-output-names = "pll0";
+ };
+ cpu_clk: clkdiv_cpu {
+ compatible = "fixed-factor-clock";
+ #clock-cells = <0>;
+ clocks = <&pll0>;
+ clock-output-names = "cpu_clk";
+ };
+ ahb_clk: clkdiv_ahb {
+ compatible = "fixed-factor-clock";
+ #clock-cells = <0>;
+ clocks = <&pll0>;
+ clock-output-names = "ahb_clk";
+ };
+
+ iomux: iomux@FF10601c {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "abilis,tb10x-iomux";
+ reg = <0xFF10601c 0x4>;
+ };
+
+ intc: interrupt-controller {
+ compatible = "snps,arc700-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ tb10x_ictl: pic@fe002000 {
+ compatible = "abilis,tb10x_ictl";
+ reg = <0xFE002000 0x20>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupts = <5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
+ 20 21 22 23 24 25 26 27 28 29 30 31>;
+ };
+
+ uart@FF100000 {
+ compatible = "snps,dw-apb-uart",
+ "abilis,simple-pinctrl";
+ reg = <0xFF100000 0x100>;
+ clock-frequency = <166666666>;
+ interrupts = <25 1>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ interrupt-parent = <&tb10x_ictl>;
+ };
+ ethernet@FE100000 {
+ compatible = "snps,dwmac-3.70a","snps,dwmac";
+ reg = <0xFE100000 0x1058>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <6 1>;
+ interrupt-names = "macirq";
+ clocks = <&ahb_clk>;
+ clock-names = "stmmaceth";
+ };
+ dma@FE000000 {
+ compatible = "snps,dma-spear1340";
+ reg = <0xFE000000 0x400>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <14 1>;
+ dma-channels = <6>;
+ dma-requests = <0>;
+ dma-masters = <1>;
+ #dma-cells = <3>;
+ chan_allocation_order = <0>;
+ chan_priority = <1>;
+ block_size = <0x7ff>;
+ data_width = <2 0 0 0>;
+ clocks = <&ahb_clk>;
+ clock-names = "hclk";
+ };
+
+ i2c0: i2c@FF120000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xFF120000 0x1000>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <12 1>;
+ clocks = <&ahb_clk>;
+ };
+ i2c1: i2c@FF121000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xFF121000 0x1000>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <12 1>;
+ clocks = <&ahb_clk>;
+ };
+ i2c2: i2c@FF122000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xFF122000 0x1000>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <12 1>;
+ clocks = <&ahb_clk>;
+ };
+ i2c3: i2c@FF123000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xFF123000 0x1000>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <12 1>;
+ clocks = <&ahb_clk>;
+ };
+ i2c4: i2c@FF124000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xFF124000 0x1000>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <12 1>;
+ clocks = <&ahb_clk>;
+ };
+
+ spi0: spi@0xFE010000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "abilis,tb100-spi";
+ num-cs = <1>;
+ reg = <0xFE010000 0x20>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <26 1>;
+ clocks = <&ahb_clk>;
+ };
+ spi1: spi@0xFE011000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "abilis,tb100-spi",
+ "abilis,simple-pinctrl";
+ num-cs = <2>;
+ reg = <0xFE011000 0x20>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <10 1>;
+ clocks = <&ahb_clk>;
+ };
+
+ tb10x_tsm: tb10x-tsm@ff316000 {
+ compatible = "abilis,tb100-tsm";
+ reg = <0xff316000 0x400>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <17 1>;
+ output-clkdiv = <4>;
+ global-packet-delay = <0x21>;
+ port-packet-delay = <0>;
+ };
+ tb10x_stream_proc: tb10x-stream-proc {
+ compatible = "abilis,tb100-streamproc";
+ reg = <0xfff00000 0x200>,
+ <0x000f0000 0x10000>,
+ <0xfff00200 0x105>,
+ <0xff10600c 0x1>,
+ <0xfe001018 0x1>;
+ reg-names = "mbox",
+ "sp_iccm",
+ "mbox_irq",
+ "cpuctrl",
+ "a6it_int_force";
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <20 1>, <19 1>;
+ interrupt-names = "cmd_irq", "event_irq";
+ };
+ tb10x_mdsc0: tb10x-mdscr@FF300000 {
+ compatible = "abilis,tb100-mdscr";
+ reg = <0xFF300000 0x7000>;
+ tb100-mdscr-manage-tsin;
+ };
+ tb10x_mscr0: tb10x-mdscr@FF307000 {
+ compatible = "abilis,tb100-mdscr";
+ reg = <0xFF307000 0x7000>;
+ };
+ tb10x_scr0: tb10x-mdscr@ff30e000 {
+ compatible = "abilis,tb100-mdscr";
+ reg = <0xFF30e000 0x4000>;
+ tb100-mdscr-manage-tsin;
+ };
+ tb10x_scr1: tb10x-mdscr@ff312000 {
+ compatible = "abilis,tb100-mdscr";
+ reg = <0xFF312000 0x4000>;
+ tb100-mdscr-manage-tsin;
+ };
+ tb10x_wfb: tb10x-wfb@ff319000 {
+ compatible = "abilis,tb100-wfb";
+ reg = <0xff319000 0x1000>;
+ interrupt-parent = <&tb10x_ictl>;
+ interrupts = <16 1>;
+ };
+ };
+};
--- /dev/null
+/*
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
+
+/ {
+ compatible = "snps,nsimosci";
+ clock-frequency = <80000000>; /* 80 MHZ */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&intc>;
+
+ chosen {
+ bootargs = "console=tty0 consoleblank=0";
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x10000000>; /* 256M */
+ };
+
+ fpga {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* child and parent address space 1:1 mapped */
+ ranges;
+
+ intc: interrupt-controller {
+ compatible = "snps,arc700-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ uart0: serial@c0000000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0xc0000000 0x2000>;
+ interrupts = <11>;
+ #clock-frequency = <80000000>;
+ clock-frequency = <3686400>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "okay";
+ };
+
+ pgu0: pgu@c9000000 {
+ compatible = "snps,arcpgufb";
+ reg = <0xc9000000 0x400>;
+ };
+
+ ps2: ps2@c9001000 {
+ compatible = "snps,arc_ps2";
+ reg = <0xc9000400 0x14>;
+ interrupts = <13>;
+ interrupt-names = "arc_ps2_irq";
+ };
+
+ eth0: ethernet@c0003000 {
+ compatible = "snps,oscilan";
+ reg = <0xc0003000 0x44>;
+ interrupts = <7>, <8>;
+ interrupt-names = "rx", "tx";
+ };
+ };
+};
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs"
+CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
CONFIG_ARC_BOARD_ML509=y
# CONFIG_ARC_HAS_RTSC is not set
CONFIG_ARC_BUILTIN_DTB_NAME="angel4"
+CONFIG_PREEMPT=y
# CONFIG_COMPACTION is not set
# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_NET=y
--- /dev/null
+CONFIG_CROSS_COMPILE="arc-elf32-"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_SWAP is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../arc_initramfs"
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARC_PLAT_FPGA_LEGACY=y
+CONFIG_ARC_BOARD_ML509=y
+# CONFIG_ARC_IDE is not set
+# CONFIG_ARCTANGENT_EMAC is not set
+# CONFIG_ARC_HAS_RTSC is not set
+CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci"
+# CONFIG_COMPACTION is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IPV6 is not set
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_BLK_DEV is not set
+CONFIG_NETDEVICES=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_MOUSE_PS2_ALPS is not set
+# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
+# CONFIG_MOUSE_PS2_SYNAPTICS is not set
+# CONFIG_MOUSE_PS2_TRACKPOINT is not set
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+# CONFIG_SERIO_I8042 is not set
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_ARC_PS2=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_ARC=y
+CONFIG_SERIAL_ARC_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_XZ_DEC=y
--- /dev/null
+CONFIG_CROSS_COMPILE="arc-elf32-"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="tb10x"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../tb10x-rootfs.cpio"
+CONFIG_INITRAMFS_ROOT_UID=2100
+CONFIG_INITRAMFS_ROOT_GID=501
+# CONFIG_RD_GZIP is not set
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLOCK is not set
+CONFIG_ARC_PLAT_TB10X=y
+CONFIG_ARC_CACHE_LINE_SHIFT=5
+# CONFIG_ARC_HAS_RTSC is not set
+CONFIG_ARC_STACK_NONEXEC=y
+CONFIG_HZ=250
+CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk"
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_COMPACTION is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_PROC_DEVICETREE=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_STMMAC_ETH=y
+CONFIG_STMMAC_DEBUG_FS=y
+CONFIG_STMMAC_DA=y
+CONFIG_STMMAC_CHAINED=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_8250_DW=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_GPIO_SYSFS=y
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_DMADEVICES=y
+CONFIG_DW_DMAC=y
+CONFIG_NET_DMA=y
+CONFIG_ASYNC_TX_DMA=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+CONFIG_HEADERS_CHECK=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
#include <linux/mm.h>
+/*
+ * Semantically we need this because icache doesn't snoop dcache/dma.
+ * However ARC Cache flush requires paddr as well as vaddr, latter not available
+ * in the flush_icache_page() API. So we no-op it but do the equivalent work
+ * in update_mmu_cache()
+ */
+#define flush_icache_page(vma, page)
+
void flush_cache_all(void);
void flush_icache_range(unsigned long start, unsigned long end);
-void flush_icache_page(struct vm_area_struct *vma, struct page *page);
-void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
- int len);
+void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len);
+void __inv_icache_page(unsigned long paddr, unsigned long vaddr);
+void __flush_dcache_page(unsigned long paddr);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
do { \
memcpy(dst, src, len); \
if (vma->vm_flags & VM_EXEC) \
- flush_icache_range_vaddr((unsigned long)(dst), vaddr, len);\
+ __sync_icache_dcache((unsigned long)(dst), vaddr, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
#ifndef __ASM_ARC_IRQ_H
#define __ASM_ARC_IRQ_H
-#define NR_IRQS 32
+#define NR_CPU_IRQS 32 /* number of interrupt lines of ARC770 CPU */
+#define NR_IRQS 128 /* allow some CPU external IRQ handling */
/* Platform Independent IRQs */
#define TIMER0_IRQ 3
#define BASE_BAUD (arc_get_core_freq() / 16)
+/*
+ * This is definitely going to break early 8250 consoles on multi-platform
+ * images but hey, it won't add any code complexity for a debug feature of
+ * one broken driver.
+ */
+#ifdef CONFIG_ARC_PLAT_TB10X
+#undef BASE_BAUD
+#define BASE_BAUD (arc_get_core_freq() / 16 / 3)
+#endif
+
#endif /* _ASM_ARC_SERIAL_H */
#ifndef __ASSEMBLY__
-#define tlb_flush(tlb) local_flush_tlb_mm((tlb)->mm)
+#define tlb_flush(tlb) \
+do { \
+ if (tlb->fullmm) \
+ flush_tlb_mm((tlb)->mm); \
+} while (0)
/*
* This pair is called at time of munmap/exit to flush cache and TLB entries
* for mappings being torn down.
* 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now)
* as we don't support aliasing configs in our VIPT D$.
- * 2) tlb-flush part - implemted via tlb_end_vma( ) can be NOP as well-
- * albiet for difft reasons - its better handled by moving to new ASID
+ * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
*
* Note, read http://lkml.org/lkml/2004/1/15/6
*/
#define tlb_start_vma(tlb, vma)
-#define tlb_end_vma(tlb, vma)
+
+#define tlb_end_vma(tlb, vma) \
+do { \
+ if (!tlb->fullmm) \
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
+} while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address)
#include <linux/interrupt.h>
#include <linux/thread_info.h>
#include <linux/kbuild.h>
+#include <linux/ptrace.h>
#include <asm/hardirq.h>
#include <asm/page.h>
-#include <asm/ptrace.h>
int main(void)
{
#include <asm/clk.h>
-unsigned long core_freq = 800000000;
+unsigned long core_freq = 80000000;
/*
* As of now we default to device-tree provided clock
#include <linux/types.h>
#include <linux/kprobes.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
#include <asm/disasm.h>
-#include <asm/uaccess.h>
#if defined(CONFIG_KGDB) || defined(CONFIG_ARC_MISALIGN_ACCESS) || \
defined(CONFIG_KPROBES)
#ifdef CONFIG_ARC_MISALIGN_ACCESS
SAVE_CALLEE_SAVED_USER
mov r3, sp ; callee_regs
-#endif
bl do_misaligned_access
-#ifdef CONFIG_ARC_MISALIGN_ACCESS
- DISCARD_CALLEE_SAVED_USER
+ ; TBD: optimize - do this only if a callee reg was involved
+ ; either a dst of emulated LD/ST or src with address-writeback
+ RESTORE_CALLEE_SAVED_USER
+#else
+ bl do_misaligned_error
#endif
b ret_from_exception
#include <linux/module.h>
#include <linux/of.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include "../../drivers/irqchip/irqchip.h"
#include <asm/sections.h>
#include <asm/irq.h>
#include <asm/mach_desc.h>
* -Disable all IRQs (on CPU side)
* -Optionally, setup the High priority Interrupts as Level 2 IRQs
*/
-void __init arc_init_IRQ(void)
+void __cpuinit arc_init_IRQ(void)
{
int level_mask = 0;
static struct irq_domain *root_domain;
-void __init init_onchip_IRQ(void)
+static int __init
+init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
{
- struct device_node *intc = NULL;
+ if (parent)
+ panic("DeviceTree incore intc not a root irq controller\n");
- intc = of_find_compatible_node(NULL, NULL, "snps,arc700-intc");
- if(!intc)
- panic("DeviceTree Missing incore intc\n");
-
- root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0,
+ root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0,
&arc_intc_domain_ops, NULL);
if (!root_domain)
/* with this we don't need to export root_domain */
irq_set_default_host(root_domain);
+
+ return 0;
}
+IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ);
+
/*
* Late Interrupt system init called from start_kernel for Boot CPU only
*
*/
void __init init_IRQ(void)
{
- init_onchip_IRQ();
-
/* Any external intc can be setup here */
if (machine_desc->init_irq)
machine_desc->init_irq();
+ /* process the entire interrupt tree in one go */
+ irqchip_init();
+
#ifdef CONFIG_SMP
/* Master CPU can initialize it's side of IPI */
if (machine_desc->init_smp)
#include <linux/kprobes.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
}
}
#endif
- return 0;
+ return 0;
}
void module_arch_cleanup(struct module *mod)
mod->arch.unw_info = unw;
}
#endif
- return 0;
+ return 0;
}
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/of_fdt.h>
+#include <linux/cache.h>
#include <asm/sections.h>
#include <asm/arcregs.h>
#include <asm/tlb.h>
-#include <asm/cache.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/irq.h>
-#include <asm/arcregs.h>
#include <asm/prom.h>
#include <asm/unwind.h>
#include <asm/clk.h>
int running_on_hw = 1; /* vs. on ISS */
char __initdata command_line[COMMAND_LINE_SIZE];
-struct machine_desc *machine_desc __initdata;
+struct machine_desc *machine_desc __cpuinitdata;
struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
-void __init read_arc_build_cfg_regs(void)
+void __cpuinit read_arc_build_cfg_regs(void)
{
struct bcr_perip uncached_space;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
return buf;
}
-void __init arc_chk_ccms(void)
+void __cpuinit arc_chk_ccms(void)
{
#if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM)
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
* hardware has dedicated regs which need to be saved/restored on ctx-sw
* (Single Precision uses core regs), thus kernel is kind of oblivious to it
*/
-void __init arc_chk_fpu(void)
+void __cpuinit arc_chk_fpu(void)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
* such as only for boot CPU etc
*/
-void __init setup_processor(void)
+void __cpuinit setup_processor(void)
{
char str[512];
int cpu_id = smp_processor_id();
void __init setup_arch(char **cmdline_p)
{
+ /* This also populates @boot_command_line from /bootargs */
+ machine_desc = setup_machine_fdt(__dtb_start);
+ if (!machine_desc)
+ panic("Embedded DT invalid\n");
+
+ /* Append any u-boot provided cmdline */
#ifdef CONFIG_CMDLINE_UBOOT
- /* Make sure that a whitespace is inserted before */
- strlcat(command_line, " ", sizeof(command_line));
+ /* Add a whitespace seperator between the 2 cmdlines */
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+ strlcat(boot_command_line, command_line, COMMAND_LINE_SIZE);
#endif
- /*
- * Append .config cmdline to base command line, which might already
- * contain u-boot "bootargs" (handled by head.S, if so configured)
- */
- strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
/* Save unparsed command line copy for /proc/cmdline */
- strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
- *cmdline_p = command_line;
-
- machine_desc = setup_machine_fdt(__dtb_start);
- if (!machine_desc)
- panic("Embedded DT invalid\n");
+ *cmdline_p = boot_command_line;
/* To force early parsing of things like mem=xxx */
parse_early_param();
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
-#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/timex.h>
#include <linux/sched.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
-#include <asm/ptrace.h>
+#include <linux/ptrace.h>
+#include <linux/kprobes.h>
+#include <linux/kgdb.h>
#include <asm/setup.h>
-#include <asm/kprobes.h>
#include <asm/unaligned.h>
-#include <asm/kgdb.h>
+#include <asm/kprobes.h>
void __init trap_init(void)
{
DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC)
DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", do_memory_error, BUS_ADRERR)
DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
+DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN)
#ifdef CONFIG_ARC_MISALIGN_ACCESS
/*
int do_misaligned_access(unsigned long cause, unsigned long address,
struct pt_regs *regs, struct callee_regs *cregs)
{
- if (misaligned_fixup(address, regs, cause, cregs) != 0) {
- siginfo_t info;
-
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRALN;
- info.si_addr = (void __user *)address;
- return handle_exception(cause, "Misaligned Access", regs,
- &info);
- }
+ if (misaligned_fixup(address, regs, cause, cregs) != 0)
+ return do_misaligned_error(cause, address, regs);
+
return 0;
}
-
-#else
-DO_ERROR_INFO(SIGSEGV, "Misaligned Access", do_misaligned_access, SEGV_ACCERR)
#endif
/*
char buf[512];
int n = 0, len = sizeof(buf);
- /* weird loop because pt_regs regs rev r12..r0, r25..r13 */
for (i = start_num; i < start_num + 13; i++) {
n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t",
i, (unsigned long)*reg_rev);
if (((i + 1) % 3) == 0)
n += scnprintf(buf + n, len - n, "\n");
+ /* because pt_regs has regs reversed: r12..r0, r25..r13 */
reg_rev--;
}
if (start_num != 0)
n += scnprintf(buf + n, len - n, "\n\n");
- pr_info("%s", buf);
+ /* To continue printing callee regs on same line as scratch regs */
+ if (start_num == 0)
+ pr_info("%s", buf);
+ else
+ pr_cont("%s\n", buf);
}
static void show_callee_regs(struct callee_regs *cregs)
dev_t dev = 0;
char *nm = buf;
+ /* can't use print_vma_addr() yet as it doesn't check for
+ * non-inclusive vma
+ */
+
vma = find_vma(current->active_mm, address);
/* check against the find_vma( ) behaviour which returns the next VMA
ino = inode->i_ino;
}
pr_info(" @off 0x%lx in [%s]\n"
- " VMA: 0x%08lx to 0x%08lx\n\n",
- address - vma->vm_start, nm, vma->vm_start, vma->vm_end);
- } else
+ " VMA: 0x%08lx to 0x%08lx\n",
+ vma->vm_start < TASK_UNMAPPED_BASE ?
+ address : address - vma->vm_start,
+ nm, vma->vm_start, vma->vm_end);
+ } else {
pr_info(" @No matching VMA found\n");
+ }
}
static void show_ecr_verbose(struct pt_regs *regs)
unsigned long address;
cause_reg = current->thread.cause_code;
- pr_info("\n[ECR]: 0x%08x => ", cause_reg);
+ pr_info("\n[ECR ]: 0x%08x => ", cause_reg);
/* For Data fault, this is data address not instruction addr */
address = current->thread.fault_address;
/* For DTLB Miss or ProtV, display the memory involved too */
if (vec == ECR_V_DTLB_MISS) {
- pr_cont("Invalid (%s) @ 0x%08lx by insn @ 0x%08lx\n",
+ pr_cont("Invalid %s 0x%08lx by insn @ 0x%08lx\n",
(cause_code == 0x01) ? "Read From" :
((cause_code == 0x02) ? "Write to" : "EX"),
address, regs->ret);
if (current->thread.cause_code)
show_ecr_verbose(regs);
- pr_info("[EFA]: 0x%08lx\n", current->thread.fault_address);
- pr_info("[ERET]: 0x%08lx (PC of Faulting Instr)\n", regs->ret);
+ pr_info("[EFA ]: 0x%08lx\n[BLINK ]: %pS\n[ERET ]: %pS\n",
+ current->thread.fault_address,
+ (void *)regs->blink, (void *)regs->ret);
- show_faulting_vma(regs->ret, buf); /* faulting code, not data */
+ if (user_mode(regs))
+ show_faulting_vma(regs->ret, buf); /* faulting code, not data */
- /* can't use print_vma_addr() yet as it doesn't check for
- * non-inclusive vma
- */
+ pr_info("[STAT32]: 0x%08lx", regs->status32);
+
+#define STS_BIT(r, bit) r->status32 & STATUS_##bit##_MASK ? #bit : ""
+ if (!user_mode(regs))
+ pr_cont(" : %2s %2s %2s %2s %2s\n",
+ STS_BIT(regs, AE), STS_BIT(regs, A2), STS_BIT(regs, A1),
+ STS_BIT(regs, E2), STS_BIT(regs, E1));
- /* print special regs */
- pr_info("status32: 0x%08lx\n", regs->status32);
- pr_info(" SP: 0x%08lx\tFP: 0x%08lx\n", regs->sp, regs->fp);
- pr_info("BTA: 0x%08lx\tBLINK: 0x%08lx\n",
- regs->bta, regs->blink);
+ pr_info("BTA: 0x%08lx\t SP: 0x%08lx\t FP: 0x%08lx\n",
+ regs->bta, regs->sp, regs->fp);
pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
regs->lp_start, regs->lp_end, regs->lp_count);
#include <asm/cachectl.h>
#include <asm/setup.h>
-
-#ifdef CONFIG_ARC_HAS_ICACHE
-static void __ic_line_inv_no_alias(unsigned long, int);
-static void __ic_line_inv_2_alias(unsigned long, int);
-static void __ic_line_inv_4_alias(unsigned long, int);
-
-/* Holds the ptr to flush routine, dependign on size due to aliasing issues */
-static void (*___flush_icache_rtn) (unsigned long, int);
-#endif
-
char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
{
int n = 0;
* the cpuinfo structure for later use.
* No Validation done here, simply read/convert the BCRs
*/
-void __init read_decode_cache_bcr(void)
+void __cpuinit read_decode_cache_bcr(void)
{
struct bcr_cache ibcr, dbcr;
struct cpuinfo_arc_cache *p_ic, *p_dc;
* 3. Enable the Caches, setup default flush mode for D-Cache
* 3. Calculate the SHMLBA used by user space
*/
-void __init arc_cache_init(void)
+void __cpuinit arc_cache_init(void)
{
unsigned int temp;
unsigned int cpu = smp_processor_id();
}
#endif
-
- /*
- * if Cache way size is <= page size then no aliasing exhibited
- * otherwise ratio determines num of aliases.
- * e.g. 32K I$, 2 way set assoc, 8k pg size
- * way-sz = 32k/2 = 16k
- * way-pg-ratio = 16k/8k = 2, so 2 aliases possible
- * (meaning 1 line could be in 2 possible locations).
- */
- way_pg_ratio = ic->sz / ARC_ICACHE_WAYS / PAGE_SIZE;
- switch (way_pg_ratio) {
- case 0:
- case 1:
- ___flush_icache_rtn = __ic_line_inv_no_alias;
- break;
- case 2:
- ___flush_icache_rtn = __ic_line_inv_2_alias;
- break;
- case 4:
- ___flush_icache_rtn = __ic_line_inv_4_alias;
- break;
- default:
- panic("Unsupported I-Cache Sz\n");
- }
#endif
/* Enable/disable I-Cache */
/*
* I-Cache Aliasing in ARC700 VIPT caches
*
- * For fetching code from I$, ARC700 uses vaddr (embedded in program code)
- * to "index" into SET of cache-line and paddr from MMU to match the TAG
- * in the WAYS of SET.
+ * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
+ * The orig Cache Management Module "CDU" only required paddr to invalidate a
+ * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
+ * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
+ * the exact same line.
*
- * However the CDU iterface (to flush/inv) lines from software, only takes
- * paddr (to have simpler hardware interface). For simpler cases, using paddr
- * alone suffices.
- * e.g. 2-way-set-assoc, 16K I$ (8k MMU pg sz, 32b cache line size):
- * way_sz = cache_sz / num_ways = 16k/2 = 8k
- * num_sets = way_sz / line_sz = 8k/32 = 256 => 8 bits
- * Ignoring the bottom 5 bits corresp to the off within a 32b cacheline,
- * bits req for calc set-index = bits 12:5 (0 based). Since this range fits
- * inside the bottom 13 bits of paddr, which are same for vaddr and paddr
- * (with 8k pg sz), paddr alone can be safely used by CDU to unambigously
- * locate a cache-line.
- *
- * However for a difft sized cache, say 32k I$, above math yields need
- * for 14 bits of vaddr to locate a cache line, which can't be provided by
- * paddr, since the bit 13 (0 based) might differ between the two.
- *
- * This lack of extra bits needed for correct line addressing, defines the
- * classical problem of Cache aliasing with VIPT architectures
- * num_aliases = 1 << extra_bits
- * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz => 2 aliases
- * 2-way-set-assoc, 64K I$ with 8k MMU pg sz => 4 aliases
- * 2-way-set-assoc, 16K I$ with 8k MMU pg sz => NO aliases
+ * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
+ * paddr alone could not be used to correctly index the cache.
*
* ------------------
* MMU v1/v2 (Fixed Page Size 8k)
* ------------------
* The solution was to provide CDU with these additonal vaddr bits. These
- * would be bits [x:13], x would depend on cache-geom.
+ * would be bits [x:13], x would depend on cache-geometry, 13 comes from
+ * standard page size of 8k.
* H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
* of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
* orig 5 bits of paddr were anyways ignored by CDU line ops, as they
* represent the offset within cache-line. The adv of using this "clumsy"
- * interface for additional info was no new reg was needed in CDU.
+ * interface for additional info was no new reg was needed in CDU programming
+ * model.
*
* 17:13 represented the max num of bits passable, actual bits needed were
* fewer, based on the num-of-aliases possible.
* -for 2 alias possibility, only bit 13 needed (32K cache)
* -for 4 alias possibility, bits 14:13 needed (64K cache)
*
- * Since vaddr was not available for all instances of I$ flush req by core
- * kernel, the only safe way (non-optimal though) was to kill all possible
- * lines which could represent an alias (even if they didnt represent one
- * in execution).
- * e.g. for 64K I$, 4 aliases possible, so we did
- * flush start
- * flush start | 0x01
- * flush start | 0x2
- * flush start | 0x3
- *
- * The penalty was invoking the operation itself, since tag match is anyways
- * paddr based, a line which didn't represent an alias would not match the
- * paddr, hence wont be killed
- *
- * Note that aliasing concerns are independent of line-sz for a given cache
- * geometry (size + set_assoc) because the extra bits required by line-sz are
- * reduced from the set calc.
- * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz and using math above
- * 32b line-sz: 9 bits set-index-calc, 5 bits offset-in-line => 1 extra bit
- * 64b line-sz: 8 bits set-index-calc, 6 bits offset-in-line => 1 extra bit
- *
* ------------------
* MMU v3
* ------------------
- * This ver of MMU supports var page sizes (1k-16k) - Linux will support
- * 8k (default), 16k and 4k.
+ * This ver of MMU supports variable page sizes (1k-16k): although Linux will
+ * only support 8k (default), 16k and 4k.
* However from hardware perspective, smaller page sizes aggrevate aliasing
* meaning more vaddr bits needed to disambiguate the cache-line-op ;
* the existing scheme of piggybacking won't work for certain configurations.
*/
/***********************************************************
- * Machine specific helpers for per line I-Cache invalidate.
- * 3 routines to accpunt for 1, 2, 4 aliases possible
+ * Machine specific helper for per line I-Cache invalidate.
*/
-
-static void __ic_line_inv_no_alias(unsigned long start, int num_lines)
-{
- while (num_lines-- > 0) {
-#if (CONFIG_ARC_MMU_VER > 2)
- write_aux_reg(ARC_REG_IC_PTAG, start);
-#endif
- write_aux_reg(ARC_REG_IC_IVIL, start);
- start += ARC_ICACHE_LINE_LEN;
- }
-}
-
-static void __ic_line_inv_2_alias(unsigned long start, int num_lines)
-{
- while (num_lines-- > 0) {
-
-#if (CONFIG_ARC_MMU_VER > 2)
- /*
- * MMU v3, CDU prog model (for line ops) now uses a new IC_PTAG
- * reg to pass the "tag" bits and existing IVIL reg only looks
- * at bits relevant for "index" (details above)
- * Programming Notes:
- * -when writing tag to PTAG reg, bit chopping can be avoided,
- * CDU ignores non-tag bits.
- * -Ideally "index" must be computed from vaddr, but it is not
- * avail in these rtns. So to be safe, we kill the lines in all
- * possible indexes corresp to num of aliases possible for
- * given cache config.
- */
- write_aux_reg(ARC_REG_IC_PTAG, start);
- write_aux_reg(ARC_REG_IC_IVIL,
- start & ~(0x1 << PAGE_SHIFT));
- write_aux_reg(ARC_REG_IC_IVIL, start | (0x1 << PAGE_SHIFT));
-#else
- write_aux_reg(ARC_REG_IC_IVIL, start);
- write_aux_reg(ARC_REG_IC_IVIL, start | 0x01);
-#endif
- start += ARC_ICACHE_LINE_LEN;
- }
-}
-
-static void __ic_line_inv_4_alias(unsigned long start, int num_lines)
-{
- while (num_lines-- > 0) {
-
-#if (CONFIG_ARC_MMU_VER > 2)
- write_aux_reg(ARC_REG_IC_PTAG, start);
-
- write_aux_reg(ARC_REG_IC_IVIL,
- start & ~(0x3 << PAGE_SHIFT));
- write_aux_reg(ARC_REG_IC_IVIL,
- start & ~(0x2 << PAGE_SHIFT));
- write_aux_reg(ARC_REG_IC_IVIL,
- start & ~(0x1 << PAGE_SHIFT));
- write_aux_reg(ARC_REG_IC_IVIL, start | (0x3 << PAGE_SHIFT));
-#else
- write_aux_reg(ARC_REG_IC_IVIL, start);
- write_aux_reg(ARC_REG_IC_IVIL, start | 0x01);
- write_aux_reg(ARC_REG_IC_IVIL, start | 0x02);
- write_aux_reg(ARC_REG_IC_IVIL, start | 0x03);
-#endif
- start += ARC_ICACHE_LINE_LEN;
- }
-}
-
-static void __ic_line_inv(unsigned long start, unsigned long sz)
+static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr,
+ unsigned long sz)
{
unsigned long flags;
int num_lines, slack;
+ unsigned int addr;
/*
- * Ensure we properly floor/ceil the non-line aligned/sized requests
- * and have @start - aligned to cache line, and integral @num_lines
+ * Ensure we properly floor/ceil the non-line aligned/sized requests:
* However page sized flushes can be compile time optimised.
- * -@start will be cache-line aligned already (being page aligned)
+ * -@phy_start will be cache-line aligned already (being page aligned)
* -@sz will be integral multiple of line size (being page sized).
*/
if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
- slack = start & ~ICACHE_LINE_MASK;
+ slack = phy_start & ~ICACHE_LINE_MASK;
sz += slack;
- start -= slack;
+ phy_start -= slack;
}
num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
- local_irq_save(flags);
- (*___flush_icache_rtn) (start, num_lines);
- local_irq_restore(flags);
-}
-
-/* Unlike routines above, having vaddr for flush op (along with paddr),
- * prevents the need to speculatively kill the lines in multiple sets
- * based on ratio of way_sz : pg_sz
- */
-static void __ic_line_inv_vaddr(unsigned long phy_start,
- unsigned long vaddr, unsigned long sz)
-{
- unsigned long flags;
- int num_lines, slack;
- unsigned int addr;
-
- slack = phy_start & ~ICACHE_LINE_MASK;
- sz += slack;
- phy_start -= slack;
- num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
-
#if (CONFIG_ARC_MMU_VER > 2)
vaddr &= ~ICACHE_LINE_MASK;
addr = phy_start;
write_aux_reg(ARC_REG_IC_IVIL, vaddr);
vaddr += ARC_ICACHE_LINE_LEN;
#else
- /* this paddr contains vaddrs bits as needed */
+ /* paddr contains stuffed vaddrs bits */
write_aux_reg(ARC_REG_IC_IVIL, addr);
#endif
addr += ARC_ICACHE_LINE_LEN;
#else
-#define __ic_line_inv(start, sz)
#define __ic_line_inv_vaddr(pstart, vstart, sz)
#endif /* CONFIG_ARC_HAS_ICACHE */
* Exported APIs
*/
-/* TBD: use pg_arch_1 to optimize this */
void flush_dcache_page(struct page *page)
{
- __dc_line_op((unsigned long)page_address(page), PAGE_SIZE, OP_FLUSH);
+ /* Make a note that dcache is not yet flushed for this page */
+ set_bit(PG_arch_1, &page->flags);
}
EXPORT_SYMBOL(flush_dcache_page);
EXPORT_SYMBOL(dma_cache_wback);
/*
- * This is API for making I/D Caches consistent when modifying code
- * (loadable modules, kprobes, etc)
+ * This is API for making I/D Caches consistent when modifying
+ * kernel code (loadable modules, kprobes, kgdb...)
* This is called on insmod, with kernel virtual address for CODE of
* the module. ARC cache maintenance ops require PHY address thus we
* need to convert vmalloc addr to PHY addr
{
unsigned int tot_sz, off, sz;
unsigned long phy, pfn;
- unsigned long flags;
/* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */
/* Case: Kernel Phy addr (0x8000_0000 onwards) */
if (likely(kstart > PAGE_OFFSET)) {
- __ic_line_inv(kstart, kend - kstart);
- __dc_line_op(kstart, kend - kstart, OP_FLUSH);
+ /*
+ * The 2nd arg despite being paddr will be used to index icache
+ * This is OK since no alternate virtual mappings will exist
+ * given the callers for this case: kprobe/kgdb in built-in
+ * kernel code only.
+ */
+ __sync_icache_dcache(kstart, kstart, kend - kstart);
return;
}
pfn = vmalloc_to_pfn((void *)kstart);
phy = (pfn << PAGE_SHIFT) + off;
sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
- local_irq_save(flags);
- __dc_line_op(phy, sz, OP_FLUSH);
- __ic_line_inv(phy, sz);
- local_irq_restore(flags);
+ __sync_icache_dcache(phy, kstart, sz);
kstart += sz;
tot_sz -= sz;
}
}
/*
- * Optimised ver of flush_icache_range() with spec callers: ptrace/signals
- * where vaddr is also available. This allows passing both vaddr and paddr
- * bits to CDU for cache flush, short-circuting the current pessimistic algo
- * which kills all possible aliases.
- * An added adv of knowing that vaddr is user-vaddr avoids various checks
- * and handling for k-vaddr, k-paddr as done in orig ver above
+ * General purpose helper to make I and D cache lines consistent.
+ * @paddr is phy addr of region
+ * @vaddr is typically user or kernel vaddr (vmalloc)
+ * Howver in one instance, flush_icache_range() by kprobe (for a breakpt in
+ * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
+ * use a paddr to index the cache (despite VIPT). This is fine since since a
+ * built-in kernel page will not have any virtual mappings (not even kernel)
+ * kprobe on loadable module is different as it will have kvaddr.
*/
-void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
- int len)
+void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
{
- __ic_line_inv_vaddr(paddr, u_vaddr, len);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __ic_line_inv_vaddr(paddr, vaddr, len);
__dc_line_op(paddr, len, OP_FLUSH);
+ local_irq_restore(flags);
}
-/*
- * XXX: This also needs to be optim using pg_arch_1
- * This is called when a page-cache page is about to be mapped into a
- * user process' address space. It offers an opportunity for a
- * port to ensure d-cache/i-cache coherency if necessary.
- */
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+/* wrapper to compile time eliminate alignment checks in flush loop */
+void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
{
- if (!(vma->vm_flags & VM_EXEC))
- return;
+ __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
+}
- __ic_line_inv((unsigned long)page_address(page), PAGE_SIZE);
+void __flush_dcache_page(unsigned long paddr)
+{
+ __dc_line_op(paddr, PAGE_SIZE, OP_FLUSH_N_INV);
}
void flush_icache_all(void)
#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-long arc_copy_from_user_noinline(void *to, const void __user * from,
+long arc_copy_from_user_noinline(void *to, const void __user *from,
unsigned long n)
{
return __arc_copy_from_user(to, from, n);
}
EXPORT_SYMBOL(arc_clear_user_noinline);
-long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
+long arc_strncpy_from_user_noinline(char *dst, const char __user *src,
long count)
{
return __arc_strncpy_from_user(dst, src, count);
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
-#include <linux/version.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
#include <asm/pgalloc.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
-#ifdef CONFIG_BLOCK_DEV_RAM
-#include <linux/blk.h>
-#endif
#include <linux/swap.h>
#include <linux/module.h>
#include <asm/page.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <asm/cache.h>
+#include <linux/cache.h>
void __iomem *ioremap(unsigned long paddr, unsigned long size)
{
local_irq_restore(flags);
}
-/* arch hook called by core VM at the end of handle_mm_fault( ),
- * when a new PTE is entered in Page Tables or an existing one
- * is modified. We aggresively pre-install a TLB entry
+/*
+ * Called at the end of pagefault, for a userspace mapped page
+ * -pre-install the corresponding TLB entry into MMU
+ * -Finalize the delayed D-cache flush (wback+inv kernel mapping)
*/
-
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddress,
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
pte_t *ptep)
{
+ unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
+
+ create_tlb(vma, vaddr, ptep);
- create_tlb(vma, vaddress, ptep);
+ /* icache doesn't snoop dcache, thus needs to be made coherent here */
+ if (vma->vm_flags & VM_EXEC) {
+ struct page *page = pfn_to_page(pte_pfn(*ptep));
+
+ /* if page was dcache dirty, flush now */
+ int dirty = test_and_clear_bit(PG_arch_1, &page->flags);
+ if (dirty) {
+ unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
+ __flush_dcache_page(paddr);
+ __inv_icache_page(paddr, vaddr);
+ }
+ }
}
/* Read the Cache Build Confuration Registers, Decode them and save into
* the cpuinfo structure for later use.
* No Validation is done here, simply read/convert the BCRs
*/
-void __init read_decode_mmu_bcr(void)
+void __cpuinit read_decode_mmu_bcr(void)
{
unsigned int tmp;
struct bcr_mmu_1_2 *mmu2; /* encoded MMU2 attr */
char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
{
int n = 0;
- struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+ struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ",
p_mmu->ver, TO_KB(p_mmu->pg_sz));
return buf;
}
-void __init arc_mmu_init(void)
+void __cpuinit arc_mmu_init(void)
{
char str[256];
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
.init_smp = iss_model_init_smp,
#endif
MACHINE_END
+
+static const char *nsimosci_compat[] __initdata = {
+ "snps,nsimosci",
+ NULL,
+};
+
+MACHINE_START(NSIMOSCI, "nsimosci")
+ .dt_compat = nsimosci_compat,
+ .init_early = NULL,
+ .init_machine = plat_fpga_populate_dev,
+ .init_irq = NULL,
+MACHINE_END
--- /dev/null
+# Abilis Systems TB10x platform kernel configuration file
+#
+# Author: Christian Ruppert <christian.ruppert@abilis.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+
+menuconfig ARC_PLAT_TB10X
+ bool "Abilis TB10x"
+ select COMMON_CLK
+ select PINCTRL
+ select PINMUX
+ select ARCH_REQUIRE_GPIOLIB
+ help
+ Support for platforms based on the TB10x home media gateway SOC by
+ Abilis Systems. TB10x is based on the ARC700 CPU architecture.
+ Say Y if you are building a kernel for one of the SOCs in this
+ series (e.g. TB100 or TB101). If in doubt say N.
+
+if ARC_PLAT_TB10X
+
+config GENERIC_GPIO
+ def_bool y
+
+endif
--- /dev/null
+# Abilis Systems TB10x platform Makefile
+#
+# Author: Christian Ruppert <christian.ruppert@abilis.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+
+KBUILD_CFLAGS += -Iarch/arc/plat-tb10x/include
+
+obj-y += tb10x.o
--- /dev/null
+/*
+ * Abilis Systems TB10x platform initialisation
+ *
+ * Copyright (C) Abilis Systems 2012
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/clk-provider.h>
+#include <linux/pinctrl/consumer.h>
+
+#include <asm/mach_desc.h>
+
+
+static void __init tb10x_platform_init(void)
+{
+ of_clk_init(NULL);
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+}
+
+static void __init tb10x_platform_late_init(void)
+{
+ struct device_node *dn;
+
+ /*
+ * Pinctrl documentation recommends setting up the iomux here for
+ * all modules which don't require control over the pins themselves.
+ * Modules which need this kind of assistance are compatible with
+ * "abilis,simple-pinctrl", i.e. we can easily iterate over them.
+ * TODO: Does this recommended method work cleanly with pins required
+ * by modules?
+ */
+ for_each_compatible_node(dn, NULL, "abilis,simple-pinctrl") {
+ struct platform_device *pd = of_find_device_by_node(dn);
+ struct pinctrl *pctl;
+
+ pctl = pinctrl_get_select(&pd->dev, "abilis,simple-default");
+ if (IS_ERR(pctl)) {
+ int ret = PTR_ERR(pctl);
+ dev_err(&pd->dev, "Could not set up pinctrl: %d\n",
+ ret);
+ }
+ }
+}
+
+static const char *tb10x_compat[] __initdata = {
+ "abilis,arc-tb10x",
+ NULL,
+};
+
+MACHINE_START(TB10x, "tb10x")
+ .dt_compat = tb10x_compat,
+ .init_machine = tb10x_platform_init,
+ .init_late = tb10x_platform_late_init,
+MACHINE_END
config SYS_SUPPORTS_APM_EMULATION
bool
-config GENERIC_GPIO
- bool
-
config HAVE_TCM
bool
select GENERIC_ALLOCATOR
bool "ARMv7 based platforms (Cortex-A, PJ4, Scorpion, Krait)"
default y
select ARCH_MULTI_V6_V7
- select ARCH_VEXPRESS
select CPU_V7
config ARCH_MULTI_V6_V7
0x44d80000 0x2000>; /* M3 DMEM */
ti,hwmods = "wkup_m3";
};
+
+ gpmc: gpmc@50000000 {
+ compatible = "ti,am3352-gpmc";
+ ti,hwmods = "gpmc";
+ reg = <0x50000000 0x2000>;
+ interrupts = <100>;
+ num-cs = <7>;
+ num-waitpins = <2>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ status = "disabled";
+ };
};
};
i2c@12C70000 {
samsung,i2c-sda-delay = <100>;
samsung,i2c-max-bus-freq = <378000>;
+
+ trackpad {
+ reg = <0x67>;
+ compatible = "cypress,cyapa";
+ interrupts = <2 0>;
+ interrupt-parent = <&gpx1>;
+ wakeup-source;
+ };
};
i2c@12C80000 {
};
usb@12110000 {
- samsung,vbus-gpio = <&gpx2 6 1 3 3>;
+ samsung,vbus-gpio = <&gpx2 6 0>;
};
dp-controller {
};
usb@12110000 {
- samsung,vbus-gpio = <&gpx1 1 1 3 3>;
+ samsung,vbus-gpio = <&gpx1 1 0>;
};
fixed-rate-clocks {
&usb_otg_hs {
interface-type = <0>;
+ usb-phy = <&usb2_phy>;
mode = <3>;
power = <50>;
};
&usb_otg_hs {
interface-type = <0>;
+ usb-phy = <&usb2_phy>;
mode = <3>;
power = <50>;
};
&usb_otg_hs {
interface-type = <0>;
+ usb-phy = <&usb2_phy>;
mode = <3>;
power = <50>;
};
interrupts = <0 92 0x4>, <0 93 0x4>;
interrupt-names = "mc", "dma";
ti,hwmods = "usb_otg_hs";
- usb-phy = <&usb2_phy>;
multipoint = <1>;
num-eps = <16>;
ram-bits = <12>;
cpu@0 {
operating-points = <
/* kHz uV */
- 300000 975000
- 600000 1075000
- 800000 1200000
+ 300000 1012500
+ 600000 1200000
+ 800000 1325000
>;
clock-latency = <300000>; /* From legacy driver */
};
>;
};
+ mcspi1_pins: pinmux_mcspi1_pins {
+ pinctrl-single,pins = <
+ 0xf2 0x100 /* mcspi1_clk.mcspi1_clk INPUT | MODE0 */
+ 0xf4 0x100 /* mcspi1_somi.mcspi1_somi INPUT | MODE0 */
+ 0xf6 0x100 /* mcspi1_simo.mcspi1_simo INPUT | MODE0 */
+ 0xf8 0x100 /* mcspi1_cs0.mcspi1_cs0 INPUT | MODE0*/
+ >;
+ };
+
dss_hdmi_pins: pinmux_dss_hdmi_pins {
pinctrl-single,pins = <
0x5a 0x118 /* hdmi_cec.hdmi_cec INPUT PULLUP | MODE 0 */
};
&mcspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcspi1_pins>;
+
eth@0 {
compatible = "ks8851";
spi-max-frequency = <24000000>;
reg = <0>;
interrupt-parent = <&gpio2>;
- interrupts = <2>; /* gpio line 34 */
+ interrupts = <2 8>; /* gpio line 34, low triggered */
vdd-supply = <&vdd_eth>;
};
};
spi-max-frequency = <24000000>;
reg = <0>;
interrupt-parent = <&gpio6>;
- interrupts = <11>; /* gpio line 171 */
+ interrupts = <11 8>; /* gpio line 171, low triggered */
vdd-supply = <&vdd_eth>;
};
};
cpu@0 {
operating-points = <
/* kHz uV */
- 350000 975000
- 700000 1075000
- 920000 1200000
+ 350000 1025000
+ 700000 1200000
+ 920000 1313000
>;
clock-latency = <300000>; /* From legacy driver */
};
CONFIG_SERIAL_8250_RSA=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_OMAP=y
+CONFIG_SERIAL_OMAP_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_I2C_CHARDEV=y
CONFIG_SPI=y
CONFIG_TWL4030_WATCHDOG=y
CONFIG_MFD_TPS65217=y
CONFIG_MFD_TPS65910=y
+CONFIG_TWL6040_CORE=y
CONFIG_REGULATOR_TWL4030=y
CONFIG_REGULATOR_TPS65023=y
CONFIG_REGULATOR_TPS6507X=y
CONFIG_SND_SOC=m
CONFIG_SND_OMAP_SOC=m
CONFIG_SND_OMAP_SOC_OMAP_TWL4030=m
+CONFIG_SND_OMAP_SOC_OMAP_ABE_TWL6040=m
CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m
CONFIG_USB=y
CONFIG_USB_DEBUG=y
unsigned long dt_root;
const char *model;
+#ifdef CONFIG_ARCH_MULTIPLATFORM
+ DT_MACHINE_START(GENERIC_DT, "Generic DT based system")
+ MACHINE_END
+
+ mdesc_best = (struct machine_desc *)&__mach_desc_GENERIC_DT;
+#endif
+
if (!dt_phys)
return NULL;
#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/screen_info.h>
+#include <linux/of_platform.h>
#include <linux/init.h>
#include <linux/kexec.h>
#include <linux/of_fdt.h>
static int __init customize_machine(void)
{
- /* customizes platform devices, or adds new ones */
+ /*
+ * customizes platform devices, or adds new ones
+ * On DT based machines, we fall back to populating the
+ * machine from the device tree, if no callback is provided,
+ * otherwise we would always need an init_machine callback.
+ */
if (machine_desc->init_machine)
machine_desc->init_machine();
+#ifdef CONFIG_OF
+ else
+ of_platform_populate(NULL, of_default_bus_match_table,
+ NULL, NULL);
+#endif
return 0;
}
arch_initcall(customize_machine);
#define EXYNOS5_FSYS_ARM_OPTION S5P_PMUREG(0x2208)
#define EXYNOS5_ISP_ARM_OPTION S5P_PMUREG(0x2288)
#define EXYNOS5_ARM_COMMON_OPTION S5P_PMUREG(0x2408)
+#define EXYNOS5_ARM_L2_OPTION S5P_PMUREG(0x2608)
#define EXYNOS5_TOP_PWR_OPTION S5P_PMUREG(0x2C48)
#define EXYNOS5_TOP_PWR_SYSMEM_OPTION S5P_PMUREG(0x2CC8)
#define EXYNOS5_JPEG_MEM_OPTION S5P_PMUREG(0x2F48)
{ EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
{ EXYNOS5_ARM_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
{ EXYNOS5_ARM_L2_SYS_PWR_REG, { 0x3, 0x3, 0x3} },
+ { EXYNOS5_ARM_L2_OPTION, { 0x10, 0x10, 0x0 } },
{ EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
{ EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
{ EXYNOS5_CMU_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
/*
* SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable
- * MANUAL_L2RSTDISABLE_CONTROL_BITFIELD Enable
*/
tmp = __raw_readl(EXYNOS5_ARM_COMMON_OPTION);
- tmp |= (EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL |
- EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN);
+ tmp |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
__raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION);
/*
select AUTO_ZRELADDR if !ZBOOT_ROM
select CLKDEV_LOOKUP
select CLKSRC_MMIO
+ select GENERIC_ALLOCATOR
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
select MULTI_IRQ_HANDLER
config ARCH_HAS_RNGA
bool
-config IRAM_ALLOC
- bool
- select GENERIC_ALLOCATOR
-
config HAVE_IMX_ANATOP
bool
obj-$(CONFIG_MXC_TZIC) += tzic.o
obj-$(CONFIG_MXC_AVIC) += avic.o
-obj-$(CONFIG_IRAM_ALLOC) += iram_alloc.o
obj-$(CONFIG_MXC_ULPI) += ulpi.o
obj-$(CONFIG_MXC_USE_EPIT) += epit.o
obj-$(CONFIG_MXC_DEBUG_BOARD) += 3ds_debugboard.o
#define __ASM_ARCH_MXC_COMMON_H__
struct platform_device;
+struct pt_regs;
struct clk;
enum mxc_cpu_pwr_mode;
ENDPROC(v7_secondary_startup)
#endif
-#ifdef CONFIG_PM
+#ifdef CONFIG_ARM_CPU_SUSPEND
/*
* The following code must assume it is running from physical address
* where absolute virtual addresses to the data section have to be
*/
#include <linux/errno.h>
+#include <linux/jiffies.h>
#include <asm/cp15.h>
+#include <asm/proc-fns.h>
#include "common.h"
+++ /dev/null
-/*
- * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/genalloc.h>
-#include "linux/platform_data/imx-iram.h"
-
-static unsigned long iram_phys_base;
-static void __iomem *iram_virt_base;
-static struct gen_pool *iram_pool;
-
-static inline void __iomem *iram_phys_to_virt(unsigned long p)
-{
- return iram_virt_base + (p - iram_phys_base);
-}
-
-void __iomem *iram_alloc(unsigned int size, unsigned long *dma_addr)
-{
- if (!iram_pool)
- return NULL;
-
- *dma_addr = gen_pool_alloc(iram_pool, size);
- pr_debug("iram alloc - %dB@0x%lX\n", size, *dma_addr);
- if (!*dma_addr)
- return NULL;
- return iram_phys_to_virt(*dma_addr);
-}
-EXPORT_SYMBOL(iram_alloc);
-
-void iram_free(unsigned long addr, unsigned int size)
-{
- if (!iram_pool)
- return;
-
- gen_pool_free(iram_pool, addr, size);
-}
-EXPORT_SYMBOL(iram_free);
-
-int __init iram_init(unsigned long base, unsigned long size)
-{
- iram_phys_base = base;
-
- iram_pool = gen_pool_create(PAGE_SHIFT, -1);
- if (!iram_pool)
- return -ENOMEM;
-
- gen_pool_add(iram_pool, base, size, -1);
- iram_virt_base = ioremap(iram_phys_base, size);
- if (!iram_virt_base)
- return -EIO;
-
- pr_debug("i.MX IRAM pool: %ld KB@0x%p\n", size / 1024, iram_virt_base);
- return 0;
-}
if (ret) {
dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
__func__, pdev->name, pdev->id);
- goto exit_device_put;
+ goto exit_iounmap;
}
p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n",
__func__, pdev->name);
ret = -ENOMEM;
- goto exit_device_del;
+ goto exit_iounmap;
}
d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL);
kfree(d);
exit_release_p:
kfree(p);
-exit_device_del:
- platform_device_del(pdev);
+exit_iounmap:
+ iounmap(dma_base);
exit_device_put:
platform_device_put(pdev);
select NEON if ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5
select PM_RUNTIME
select REGULATOR
- select SERIAL_OMAP
- select SERIAL_OMAP_CONSOLE
select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
select VFP
# SMP support ONLY available for OMAP4
-obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o
-obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o
+smp-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o
+smp-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o
omap-4-5-common = omap4-common.o omap-wakeupgen.o \
sleep44xx.o
-obj-$(CONFIG_ARCH_OMAP4) += $(omap-4-5-common)
-obj-$(CONFIG_SOC_OMAP5) += $(omap-4-5-common)
+obj-$(CONFIG_ARCH_OMAP4) += $(omap-4-5-common) $(smp-y)
+obj-$(CONFIG_SOC_OMAP5) += $(omap-4-5-common) $(smp-y)
plus_sec := $(call as-instr,.arch_extension sec,+sec)
AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec)
*/
static struct {
int mmc1_gpio_wp;
- int usb_pwr_level;
+ bool usb_pwr_level; /* 0 - Active Low, 1 - Active High */
int dvi_pd_gpio;
int usr_button_gpio;
int mmc_caps;
} beagle_config = {
.mmc1_gpio_wp = -EINVAL,
- .usb_pwr_level = GPIOF_OUT_INIT_LOW,
+ .usb_pwr_level = 0,
.dvi_pd_gpio = -EINVAL,
.usr_button_gpio = 4,
.mmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
case 0:
printk(KERN_INFO "OMAP3 Beagle Rev: xM Ax/Bx\n");
omap3_beagle_version = OMAP3BEAGLE_BOARD_XM;
- beagle_config.usb_pwr_level = GPIOF_OUT_INIT_HIGH;
+ beagle_config.usb_pwr_level = 1;
beagle_config.mmc_caps &= ~MMC_CAP_8_BIT_DATA;
break;
case 2:
#define LIS302_IRQ1_GPIO 181
#define LIS302_IRQ2_GPIO 180 /* Not yet in use */
-/* list all spi devices here */
+/* List all SPI devices here. Note that the list/probe order seems to matter! */
enum {
RX51_SPI_WL1251,
- RX51_SPI_MIPID, /* LCD panel */
RX51_SPI_TSC2005, /* Touch Controller */
+ RX51_SPI_MIPID, /* LCD panel */
};
static struct wl12xx_platform_data wl1251_pdata;
#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/of.h>
#include <linux/omap-dma.h>
#include "soc.h"
if (res)
return res;
+ if (of_have_populated_dt())
+ return res;
+
pdev = platform_device_register_full(&omap_dma_dev_info);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
return ret;
}
- for_each_node_by_name(child, "nand") {
- ret = gpmc_probe_nand_child(pdev, child);
- if (ret < 0) {
- of_node_put(child);
- return ret;
- }
- }
+ for_each_child_of_node(pdev->dev.of_node, child) {
- for_each_node_by_name(child, "onenand") {
- ret = gpmc_probe_onenand_child(pdev, child);
- if (ret < 0) {
- of_node_put(child);
- return ret;
- }
- }
+ if (!child->name)
+ continue;
- for_each_node_by_name(child, "nor") {
- ret = gpmc_probe_generic_child(pdev, child);
- if (ret < 0) {
- of_node_put(child);
- return ret;
- }
- }
+ if (of_node_cmp(child->name, "nand") == 0)
+ ret = gpmc_probe_nand_child(pdev, child);
+ else if (of_node_cmp(child->name, "onenand") == 0)
+ ret = gpmc_probe_onenand_child(pdev, child);
+ else if (of_node_cmp(child->name, "ethernet") == 0 ||
+ of_node_cmp(child->name, "nor") == 0)
+ ret = gpmc_probe_generic_child(pdev, child);
- for_each_node_by_name(child, "ethernet") {
- ret = gpmc_probe_generic_child(pdev, child);
- if (ret < 0) {
+ if (WARN(ret < 0, "%s: probing gpmc child %s failed\n",
+ __func__, child->full_name))
of_node_put(child);
- return ret;
- }
}
return 0;
cpu_rev = "1.0";
break;
case 1:
- /* FALLTHROUGH */
- default:
omap_revision = AM335X_REV_ES2_0;
cpu_rev = "2.0";
break;
+ case 2:
+ /* FALLTHROUGH */
+ default:
+ omap_revision = AM335X_REV_ES2_1;
+ cpu_rev = "2.1";
+ break;
}
break;
case 0xb8f2:
soc_dev_attr->revision = soc_rev;
soc_dev = soc_device_register(soc_dev_attr);
- if (IS_ERR_OR_NULL(soc_dev)) {
+ if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr);
return;
}
parent = soc_device_to_device(soc_dev);
- if (!IS_ERR_OR_NULL(parent))
- device_create_file(parent, &omap_soc_attr);
+ device_create_file(parent, &omap_soc_attr);
}
#endif /* CONFIG_SOC_BUS */
#define OMAP3_CONTROL_PADCONF_SAD2D_SWAKEUP_OFFSET 0xa1c
#define OMAP3_CONTROL_PADCONF_JTAG_RTCK_OFFSET 0xa1e
#define OMAP3_CONTROL_PADCONF_JTAG_TDO_OFFSET 0xa20
+#define OMAP3_CONTROL_PADCONF_GPIO_127 0xa24
+#define OMAP3_CONTROL_PADCONF_GPIO_126 0xa26
+#define OMAP3_CONTROL_PADCONF_GPIO_128 0xa28
+#define OMAP3_CONTROL_PADCONF_GPIO_129 0xa2a
#define OMAP3_CONTROL_PADCONF_MUX_SIZE \
- (OMAP3_CONTROL_PADCONF_JTAG_TDO_OFFSET + 0x2)
+ (OMAP3_CONTROL_PADCONF_GPIO_129 + 0x2)
bus_for_each_dev(&platform_bus_type, NULL, NULL, omap_device_late_idle);
return 0;
}
-omap_late_initcall(omap_device_late_init);
+omap_late_initcall_sync(omap_device_late_init);
#define AM335X_CLASS 0x33500033
#define AM335X_REV_ES1_0 AM335X_CLASS
#define AM335X_REV_ES2_0 (AM335X_CLASS | (0x1 << 8))
+#define AM335X_REV_ES2_1 (AM335X_CLASS | (0x2 << 8))
#define OMAP443X_CLASS 0x44300044
#define OMAP4430_REV_ES1_0 (OMAP443X_CLASS | (0x10 << 8))
#define omap_subsys_initcall(fn) omap_initcall(subsys_initcall, fn)
#define omap_device_initcall(fn) omap_initcall(device_initcall, fn)
#define omap_late_initcall(fn) omap_initcall(late_initcall, fn)
+#define omap_late_initcall_sync(fn) omap_initcall(late_initcall_sync, fn)
#endif /* __ASSEMBLY__ */
select CPU_V7
select HAVE_ARM_SCU if SMP
select HAVE_SMP
- select SMP_ON_UP
+ select SMP_ON_UP if SMP
help
Support for CSR SiRFSoC ARM Cortex A9 Platform
select MTD
select MTD_CFI
select MTD_CFI_INTELEXT
- select MTD_CHAR
select MTD_PHYSMAP
select PXA25x
select SMC91X
* write alloc and 'Full line of zero' options
*
*/
+ if (!IS_ENABLED(CONFIG_CACHE_L2X0))
+ return;
writel_relaxed(0x06, VA_L2CC_BASE + L2X0_PREFETCH_CTRL);
select ARM_ARCH_TIMER
select ARM_GIC
select ARM_L1_CACHE_SHIFT_6
+ select CPU_FREQ_TABLE if CPU_FREQ
select CPU_V7
select PINCTRL
select PINCTRL_TEGRA114
config UX500_SOC_COMMON
bool
default y
+ select ABX500_CORE
+ select AB8500_CORE
select ARM_ERRATA_754322
select ARM_ERRATA_764369 if SMP
select ARM_GIC
"no regulator\n");
return PTR_ERR(prox_regulator);
}
- regulator_enable(prox_regulator);
- return 0;
+
+ return regulator_enable(prox_regulator);
}
static void mop500_prox_deactivate(struct device *dev)
/* Throw these device-specific numbers into the entropy pool */
add_device_randomness(uid, 0x14);
return kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x",
- readl((u32 *)uid+1),
+ readl((u32 *)uid+0),
readl((u32 *)uid+1), readl((u32 *)uid+2),
readl((u32 *)uid+3), readl((u32 *)uid+4));
}
#
ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include
-orion-gpio-$(CONFIG_GENERIC_GPIO) += gpio.o
+orion-gpio-$(CONFIG_GPIOLIB) += gpio.o
obj-$(CONFIG_PLAT_ORION_LEGACY) += irq.o pcie.o time.o common.o mpp.o
obj-$(CONFIG_PLAT_ORION_LEGACY) += $(orion-gpio-y)
}
/*
- * GENERIC_GPIO primitives.
+ * GPIO primitives.
*/
static int orion_gpio_request(struct gpio_chip *chip, unsigned pin)
{
config IOMMU_HELPER
def_bool SWIOTLB
-config GENERIC_GPIO
- bool
-
source "init/Kconfig"
source "kernel/Kconfig.freezer"
There is an AVR32 Linux project with a web page at
http://avr32linux.org/.
-config GENERIC_GPIO
- def_bool y
-
config STACKTRACE_SUPPORT
def_bool y
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select ARCH_HAVE_CUSTOM_GPIO_H
- select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARCH_REQUIRE_GPIOLIB
select HAVE_UID16
select HAVE_UNDERSCORE_SYMBOL_PREFIX
select VIRT_TO_BUS
config ZONE_DMA
def_bool y
-config GENERIC_GPIO
- def_bool y
-
config FORCE_MAX_ZONEORDER
int
default "14"
select MTD_CFI
select MTD_CFI_AMDSTD
select MTD_JEDECPROBE if ETRAX_ARCH_V32
- select MTD_CHAR
select MTD_BLOCK
select MTD_COMPLEX_MAPPINGS
help
select MTD_CFI
select MTD_CFI_AMDSTD
select MTD_JEDECPROBE
- select MTD_CHAR
select MTD_BLOCK
select MTD_COMPLEX_MAPPINGS
help
source "kernel/Kconfig.hz"
-config GENERIC_GPIO
- def_bool n
-
endmenu
source "init/Kconfig"
config HAVE_SETUP_PER_CPU_AREA
def_bool y
-config GENERIC_GPIO
- bool
-
config DMI
bool
default y
config ARCH_HAS_ILOG2_U64
bool
-config GENERIC_GPIO
- bool
-
config GENERIC_HWEIGHT
bool
default y
config COLDFIRE
bool "Coldfire CPU family support"
- select GENERIC_GPIO
- select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARCH_REQUIRE_GPIOLIB
select ARCH_HAVE_CUSTOM_GPIO_H
select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_MULDIV64
config GENERIC_CALIBRATE_DELAY
def_bool y
-config GENERIC_GPIO
- def_bool n
-
config NO_IOPORT
def_bool y
config GENERIC_CALIBRATE_DELAY
def_bool y
-config GENERIC_GPIO
- bool
-
config GENERIC_CSUM
def_bool y
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_APM_EMULATION
- select GENERIC_GPIO
- select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARCH_REQUIRE_GPIOLIB
select SYS_SUPPORTS_ZBOOT
select USB_ARCH_HAS_OHCI
select USB_ARCH_HAS_EHCI
select SYS_SUPPORTS_ZBOOT_UART16550
select DMA_NONCOHERENT
select IRQ_CPU
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
select SYS_HAS_EARLY_PRINTK
select HAVE_PWM
bool
config GPIO_TXX9
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
bool
config ISA_DMA_API
bool
-config GENERIC_GPIO
- bool
-
config HOLES_IN_ZONE
bool
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_BIG_ENDIAN
- select GENERIC_GPIO
select CPU_MIPSR2_IRQ_VI
config SOC_PNX8335
bool "Loongson 2F"
depends on SYS_HAS_CPU_LOONGSON2F
select CPU_LOONGSON2
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
The Loongson 2F processor implements the MIPS III instruction set
obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \
pci.o bonito-irq.o mem.o machtype.o platform.o
-obj-$(CONFIG_GENERIC_GPIO) += gpio.o
+obj-$(CONFIG_GPIOLIB) += gpio.o
#
# Serial port support
/* GPIO support */
-#ifdef CONFIG_GENERIC_GPIO
+#ifdef CONFIG_GPIOLIB
int gpio_to_irq(unsigned gpio)
{
return -EINVAL;
config NO_IOPORT
def_bool y
-config GENERIC_GPIO
- def_bool y
-
config TRACE_IRQFLAGS_SUPPORT
def_bool y
bool
default y
-config GENERIC_GPIO
- bool
- help
- Generic GPIO API support
-
config PPC
bool
default y
bool "PPC4xx GPIO support"
depends on 40x
select ARCH_REQUIRE_GPIOLIB
- select GENERIC_GPIO
help
Enable gpiolib support for ppc40x based boards
bool "PPC4xx GPIO support"
depends on 44x
select ARCH_REQUIRE_GPIOLIB
- select GENERIC_GPIO
help
Enable gpiolib support for ppc440 based boards
select DEFAULT_UIMAGE
select SWIOTLB
select MMIO_NVRAM
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
select GE_FPGA
help
select PPC_E500MC
select PHYS_64BIT
select SWIOTLB
- select GENERIC_GPIO
+ select GPIOLIB
select ARCH_REQUIRE_GPIOLIB
select HAS_RAPIDIO
select PPC_EPAPR_HV_PIC
bool "GE PPC9A"
select DEFAULT_UIMAGE
select MMIO_NVRAM
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
select GE_FPGA
help
bool "GE SBC310"
select DEFAULT_UIMAGE
select MMIO_NVRAM
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
select GE_FPGA
help
bool "GE SBC610"
select DEFAULT_UIMAGE
select MMIO_NVRAM
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
select GE_FPGA
select HAS_RAPIDIO
config 8xx_GPIO
bool "GPIO API Support"
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
Saying Y here will cause the ports on an MPC8xx processor to be used
config QE_GPIO
bool "QE GPIO support"
depends on QUICC_ENGINE
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
Say Y here if you're going to use hardware that connects to the
select PPC_LIB_RHEAP
select PPC_PCI_CHOICE
select ARCH_REQUIRE_GPIOLIB
- select GENERIC_GPIO
help
The CPM2 (Communications Processor Module) is a coprocessor on
embedded CPUs made by Freescale. Selecting this option means that
config SIMPLE_GPIO
bool "Support for simple, memory-mapped GPIO controllers"
depends on PPC
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
Say Y here to support simple, memory-mapped GPIO controllers.
config MCU_MPC8349EMITX
bool "MPC8349E-mITX MCU driver"
depends on I2C=y && PPC_83xx
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
Say Y here to enable soft power-off functionality on the Freescale
config GENERIC_HWEIGHT
def_bool y
-config GENERIC_GPIO
- def_bool n
-
config GENERIC_CALIBRATE_DELAY
bool
obj-y := fpga.o irq.o nmi.o setup.o
-obj-$(CONFIG_GENERIC_GPIO) += gpio.o
+obj-$(CONFIG_GPIOLIB) += gpio.o
obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o
obj-y += setup.o ilsel.o
-obj-$(CONFIG_GENERIC_GPIO) += gpio.o
+obj-$(CONFIG_GPIOLIB) += gpio.o
pinmux-$(CONFIG_CPU_SUBTYPE_SH7264) := pinmux-sh7264.o
pinmux-$(CONFIG_CPU_SUBTYPE_SH7269) := pinmux-sh7269.o
-obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y)
+obj-$(CONFIG_GPIOLIB) += $(pinmux-y)
pinmux-$(CONFIG_CPU_SUBTYPE_SH7720) := pinmux-sh7720.o
obj-y += $(clock-y)
-obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y)
+obj-$(CONFIG_GPIOLIB) += $(pinmux-y)
obj-y += $(clock-y)
obj-$(CONFIG_SMP) += $(smp-y)
-obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y)
+obj-$(CONFIG_GPIOLIB) += $(pinmux-y)
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += ubc.o
bool
default y if SPARC32
-config GENERIC_GPIO
- bool
- help
- Generic GPIO API support
-
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y if SPARC64
config KERNEL_PL
int "Processor protection level for kernel"
range 1 2
- default "1"
+ default 2 if TILEGX
+ default 1 if !TILEGX
---help---
- This setting determines the processor protection level the
- kernel will be built to run at. Generally you should use
- the default value here.
+ Since MDE 4.2, the Tilera hypervisor runs the kernel
+ at PL2 by default. If running under an older hypervisor,
+ or as a KVM guest, you must run at PL1. (The current
+ hypervisor may also be recompiled with "make HV_PL=2" to
+ allow it to run a kernel at PL1, but clients running at PL1
+ are not expected to be supported indefinitely.)
+
+ If you're not sure, don't change the default.
source "arch/tile/gxio/Kconfig"
#define HV_DISPATCH_ENTRY_SIZE 32
/** Version of the hypervisor interface defined by this file */
-#define _HV_VERSION 11
+#define _HV_VERSION 13
+
+/** Last version of the hypervisor interface with old hv_init() ABI.
+ *
+ * The change from version 12 to version 13 corresponds to launching
+ * the client by default at PL2 instead of PL1 (corresponding to the
+ * hv itself running at PL3 instead of PL2). To make this explicit,
+ * the hv_init() API was also extended so the client can report its
+ * desired PL, resulting in a more helpful failure diagnostic. If you
+ * call hv_init() with _HV_VERSION_OLD_HV_INIT and omit the client_pl
+ * argument, the hypervisor will assume client_pl = 1.
+ *
+ * Note that this is a deprecated solution and we do not expect to
+ * support clients of the Tilera hypervisor running at PL1 indefinitely.
+ */
+#define _HV_VERSION_OLD_HV_INIT 12
/* Index into hypervisor interface dispatch code blocks.
*
#ifndef __ASSEMBLER__
/** Pass HV_VERSION to hv_init to request this version of the interface. */
-typedef enum { HV_VERSION = _HV_VERSION } HV_VersionNumber;
+typedef enum {
+ HV_VERSION = _HV_VERSION,
+ HV_VERSION_OLD_HV_INIT = _HV_VERSION_OLD_HV_INIT,
+
+} HV_VersionNumber;
/** Initializes the hypervisor.
*
* that this program expects, typically HV_VERSION.
* @param chip_num Architecture number of the chip the client was built for.
* @param chip_rev_num Revision number of the chip the client was built for.
+ * @param client_pl Privilege level the client is built for
+ * (not required if interface_version_number == HV_VERSION_OLD_HV_INIT).
*/
void hv_init(HV_VersionNumber interface_version_number,
- int chip_num, int chip_rev_num);
+ int chip_num, int chip_rev_num, int client_pl);
/** Queries we can make for hv_sysconf().
movei r2, TILE_CHIP_REV
}
{
- moveli r0, _HV_VERSION
+ moveli r0, _HV_VERSION_OLD_HV_INIT
jal hv_init
}
/* Get a reasonable default ASID in r0 */
ENTRY(_start)
/* Notify the hypervisor of what version of the API we want */
{
+#if KERNEL_PL == 1 && _HV_VERSION == 13
+ /* Support older hypervisors by asking for API version 12. */
+ movei r0, _HV_VERSION_OLD_HV_INIT
+#else
+ movei r0, _HV_VERSION
+#endif
movei r1, TILE_CHIP
- movei r2, TILE_CHIP_REV
}
{
- moveli r0, _HV_VERSION
- jal hv_init
+ movei r2, TILE_CHIP_REV
+ movei r3, KERNEL_PL
}
+ jal hv_init
/* Get a reasonable default ASID in r0 */
{
move r0, zero
* preserve the semantic that the same read lock can be acquired in an
* interrupt context.
*/
-inline int arch_read_trylock(arch_rwlock_t *rwlock)
+int arch_read_trylock(arch_rwlock_t *rwlock)
{
u32 val;
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);
designs licensed by PKUnity Ltd.
Please see web page at <http://www.pkunity.com/>.
-config GENERIC_GPIO
- def_bool y
-
config GENERIC_CSUM
def_bool y
config LEDS
def_bool y
- depends on GENERIC_GPIO
+ depends on GPIOLIB
config ALIGNMENT_TRAP
def_bool y
config PUV3_GPIO
bool
depends on !ARCH_FPGA
- select GENERIC_GPIO
select GPIO_SYSFS
default y
config GENERIC_HWEIGHT
def_bool y
-config GENERIC_GPIO
- bool
-
config ARCH_MAY_HAVE_PC_FDC
def_bool y
depends on ISA_DMA_API
*/
static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
{
+ if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
+ || devfn == PCI_DEVFN(0, 0)
+ || devfn == PCI_DEVFN(3, 0)))
+ return 1;
+
/* This is a workaround for A0 LNC bug where PCI status register does
* not have new CAP bit set. can not be written by SW either.
*
*/
if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE)
return 0;
- if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
- || devfn == PCI_DEVFN(0, 0)
- || devfn == PCI_DEVFN(3, 0)))
- return 1;
+
return 0; /* langwell on others */
}
-config FRAME_POINTER
- def_bool n
-
config ZONE_DMA
def_bool y
config XTENSA
def_bool y
+ select ARCH_WANT_FRAME_POINTERS
select HAVE_IDE
select GENERIC_ATOMIC64
select HAVE_GENERIC_HARDIRQS
config GENERIC_HWEIGHT
def_bool y
-config GENERIC_GPIO
- bool
-
config ARCH_HAS_ILOG2_U32
def_bool n
source "init/Kconfig"
source "kernel/Kconfig.freezer"
+config LOCKDEP_SUPPORT
+ def_bool y
+
+config STACKTRACE_SUPPORT
+ def_bool y
+
+config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
+
config MMU
def_bool n
help
Can we use information of configuration file?
+config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ bool "Initialize Xtensa MMU inside the Linux kernel code"
+ default y
+ help
+ Earlier version initialized the MMU in the exception vector
+ before jumping to _startup in head.S and had an advantage that
+ it was possible to place a software breakpoint at 'reset' and
+ then enter your normal kernel breakpoints once the MMU was mapped
+ to the kernel mappings (0XC0000000).
+
+ This unfortunately doesn't work for U-Boot and likley also wont
+ work for using KEXEC to have a hot kernel ready for doing a
+ KDUMP.
+
+ So now the MMU is initialized in head.S but it's necessary to
+ use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup.
+ xt-gdb can't place a Software Breakpoint in the 0XD region prior
+ to mapping the MMU and after mapping even if the area of low memory
+ was mapped gdb wouldn't remove the breakpoint on hitting it as the
+ PC wouldn't match. Since Hardware Breakpoints are recommended for
+ Linux configurations it seems reasonable to just assume they exist
+ and leave this older mechanism for unfortunate souls that choose
+ not to follow Tensilica's recommendation.
+
+ Selecting this will cause U-Boot to set the KERNEL Load and Entry
+ address at 0x00003000 instead of the mapped std of 0xD0003000.
+
+ If in doubt, say Y.
+
endmenu
config XTENSA_CALIBRATE_CCOUNT
menu "Executable file formats"
-# only elf supported
-config KCORE_ELF
- def_bool y
- depends on PROC_FS
- help
- If you enabled support for /proc file system then the file
- /proc/kcore will contain the kernel core image in ELF format. This
- can be used in gdb:
-
- $ cd /usr/src/linux ; gdb vmlinux /proc/kcore
-
- This is especially useful if you have compiled the kernel with the
- "-g" option to preserve debugging information. It is mainly used
- for examining kernel data structures on the live kernel.
-
source "fs/Kconfig.binfmt"
endmenu
export OBJCOPY_ARGS
export CPPFLAGS_boot.lds += -P -C
+export KBUILD_AFLAGS += -mtext-section-literals
boot-y := bootstrap.o
-#include <variant/core.h>
+/*
+ * linux/arch/xtensa/boot/boot-elf/boot.lds.S
+ *
+ * Copyright (C) 2008 - 2013 by Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com
+ * Pete Delaney <piet@tensilica.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/vectors.h>
OUTPUT_ARCH(xtensa)
ENTRY(_ResetVector)
SECTIONS
{
- .start 0xD0000000 : { *(.start) }
-
- .text 0xD0000000:
- {
- __reloc_start = . ;
- _text_start = . ;
- *(.literal .text.literal .text)
- _text_end = . ;
- }
-
- .rodata ALIGN(0x04):
- {
- *(.rodata)
- *(.rodata1)
- }
-
- .data ALIGN(0x04):
+ .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
{
- *(.data)
- *(.data1)
- *(.sdata)
- *(.sdata2)
- *(.got.plt)
- *(.got)
- *(.dynamic)
+ *(.ResetVector.text)
}
- __reloc_end = . ;
-
- . = ALIGN(0x10);
- __image_load = . ;
- .image 0xd0001000:
+ .image KERNELOFFSET: AT (LOAD_MEMORY_ADDRESS)
{
_image_start = .;
*(image)
_image_end = . ;
}
-
.bss ((LOADADDR(.image) + SIZEOF(.image) + 3) & ~ 3):
{
__bss_start = .;
*(.bss)
__bss_end = .;
}
- _end = .;
- _param_start = .;
- .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
+ /*
+ * This is a remapped copy of the Reset Vector Code.
+ * It keeps gdb in sync with the PC after switching
+ * to the temporary mapping used while setting up
+ * the V2 MMU mappings for Linux.
+ */
+ .ResetVector.remapped_text 0x46000000 (INFO):
{
- *(.ResetVector.text)
+ *(.ResetVector.remapped_text)
}
-
-
- PROVIDE (end = .);
}
+/*
+ * arch/xtensa/boot/boot-elf/bootstrap.S
+ *
+ * Low-level exception handling
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 - 2013 by Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com>
+ * Piet Delaney <piet@tensilica.com>
+ */
#include <asm/bootparam.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/cacheasm.h>
+#include <asm/initialize_mmu.h>
+#include <linux/linkage.h>
-
-/* ResetVector
- */
- .section .ResetVector.text, "ax"
+ .section .ResetVector.text, "ax"
.global _ResetVector
+ .global reset
+
_ResetVector:
- _j reset
+ _j _SetupMMU
+
+ .begin no-absolute-literals
+ .literal_position
+
.align 4
RomInitAddr:
- .word 0xd0001000
+#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \
+ XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+ .word 0x00003000
+#else
+ .word 0xd0003000
+#endif
RomBootParam:
.word _bootparam
+_bootparam:
+ .short BP_TAG_FIRST
+ .short 4
+ .long BP_VERSION
+ .short BP_TAG_LAST
+ .short 0
+ .long 0
+
+ .align 4
+_SetupMMU:
+ movi a0, 0
+ wsr a0, windowbase
+ rsync
+ movi a0, 1
+ wsr a0, windowstart
+ rsync
+ movi a0, 0x1F
+ wsr a0, ps
+ rsync
+
+ Offset = _SetupMMU - _ResetVector
+
+#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#endif
+
+ .end no-absolute-literals
+
+ rsil a0, XCHAL_DEBUGLEVEL-1
+ rsync
reset:
l32r a0, RomInitAddr
l32r a2, RomBootParam
jx a0
.align 4
- .section .bootstrap.data, "aw"
- .globl _bootparam
-_bootparam:
- .short BP_TAG_FIRST
- .short 4
- .long BP_VERSION
- .short BP_TAG_LAST
- .short 0
- .long 0
+ .section .ResetVector.remapped_text, "x"
+ .global _RemappedResetVector
+
+ /* Do org before literals */
+ .org 0
+
+_RemappedResetVector:
+ .begin no-absolute-literals
+ .literal_position
+
+ _j _RemappedSetupMMU
+
+ /* Position Remapped code at the same location as the original code */
+ . = _RemappedResetVector + Offset
+
+_RemappedSetupMMU:
+#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#endif
+
+ .end no-absolute-literals
. = ALIGN(0x10);
__image_load = . ;
- .image 0xd0001000: AT(__image_load)
+ .image 0xd0003000: AT(__image_load)
{
_image_start = .;
*(image)
# for more details.
#
-UIMAGE_LOADADDR = 0xd0001000
+ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+UIMAGE_LOADADDR = 0x00003000
+else
+UIMAGE_LOADADDR = 0xd0003000
+endif
UIMAGE_COMPRESSION = gzip
$(obj)/../uImage: vmlinux.bin.gz FORCE
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
-CONFIG_GENERIC_GPIO=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_NO_IOPORT=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
-CONFIG_GENERIC_GPIO=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_NO_IOPORT=y
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kvm_para.h
+generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += percpu.h
-/* empty */
+/*
+ * arch/xtensa/include/asm/ftrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Tensilica Inc.
+ */
+#ifndef _XTENSA_FTRACE_H
+#define _XTENSA_FTRACE_H
+
+#include <asm/processor.h>
+
+#define HAVE_ARCH_CALLER_ADDR
+#define CALLER_ADDR0 ({ unsigned long a0, a1; \
+ __asm__ __volatile__ ( \
+ "mov %0, a0\n" \
+ "mov %1, a1\n" \
+ : "=r"(a0), "=r"(a1) : : ); \
+ MAKE_PC_FROM_RA(a0, a1); })
+#ifdef CONFIG_FRAME_POINTER
+extern unsigned long return_address(unsigned level);
+#define CALLER_ADDR1 return_address(1)
+#define CALLER_ADDR2 return_address(2)
+#define CALLER_ADDR3 return_address(3)
+#else
+#define CALLER_ADDR1 (0)
+#define CALLER_ADDR2 (0)
+#define CALLER_ADDR3 (0)
+#endif
+
+#endif /* _XTENSA_FTRACE_H */
#ifndef _XTENSA_INITIALIZE_MMU_H
#define _XTENSA_INITIALIZE_MMU_H
+#include <asm/pgtable.h>
+#include <asm/vectors.h>
+
#ifdef __ASSEMBLY__
#define XTENSA_HWVERSION_RC_2009_0 230000
* (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
*/
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+/*
+ * Have MMU v3
+ */
+
+#if !XCHAL_HAVE_VECBASE
+# error "MMU v3 requires reloc vectors"
+#endif
+
+ movi a1, 0
+ _call0 1f
+ _j 2f
+
+ .align 4
+1: movi a2, 0x10000000
+ movi a3, 0x18000000
+ add a2, a2, a0
+9: bgeu a2, a3, 9b /* PC is out of the expected range */
+
+ /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
+
+ movi a2, 0x40000006
+ idtlb a2
+ iitlb a2
+ isync
+
+ /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
+ * and jump to the new mapping.
+ */
+#define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+#define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+
+ srli a3, a0, 27
+ slli a3, a3, 27
+ addi a3, a3, CA_BYPASS
+ addi a7, a2, -1
+ wdtlb a3, a7
+ witlb a3, a7
+ isync
+
+ slli a4, a0, 5
+ srli a4, a4, 5
+ addi a5, a2, -6
+ add a4, a4, a5
+ jx a4
+
+ /* Step 3: unmap everything other than current area.
+ * Start at 0x60000000, wrap around, and end with 0x20000000
+ */
+2: movi a4, 0x20000000
+ add a5, a2, a4
+3: idtlb a5
+ iitlb a5
+ add a5, a5, a4
+ bne a5, a2, 3b
+
+ /* Step 4: Setup MMU with the old V2 mappings. */
+ movi a6, 0x01000000
+ wsr a6, ITLBCFG
+ wsr a6, DTLBCFG
+ isync
+
+ movi a5, 0xd0000005
+ movi a4, CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, 0xd8000005
+ movi a4, CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, 0xe0000006
+ movi a4, 0xf0000000 + CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, 0xf0000006
+ movi a4, 0xf0000000 + CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+
+ isync
+
+ /* Jump to self, using MMU v2 mappings. */
+ movi a4, 1f
+ jx a4
+
+1:
+ movi a2, VECBASE_RESET_VADDR
+ wsr a2, vecbase
+
+ /* Step 5: remove temporary mapping. */
+ idtlb a7
+ iitlb a7
+ isync
+
+ movi a0, 0
+ wsr a0, ptevaddr
+ rsync
+
+#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
+ XCHAL_HAVE_SPANNING_WAY */
+
.endm
#endif /*__ASSEMBLY__*/
static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
- return (flags & 0xf) != 0;
+#if XCHAL_EXCM_LEVEL < LOCKLEVEL || (1 << PS_EXCM_BIT) < LOCKLEVEL
+#error "XCHAL_EXCM_LEVEL and 1<<PS_EXCM_BIT must be no less than LOCKLEVEL"
+#endif
+ return (flags & (PS_INTLEVEL_MASK | (1 << PS_EXCM_BIT))) >= LOCKLEVEL;
}
static inline bool arch_irqs_disabled(void)
+++ /dev/null
-/*
- * include/asm-xtensa/linkage.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_LINKAGE_H
-#define _XTENSA_LINKAGE_H
-
-/* Nothing to do here ... */
-
-#endif /* _XTENSA_LINKAGE_H */
--- /dev/null
+/*
+ * arch/xtensa/include/asm/stacktrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+#ifndef _XTENSA_STACKTRACE_H
+#define _XTENSA_STACKTRACE_H
+
+#include <linux/sched.h>
+
+struct stackframe {
+ unsigned long pc;
+ unsigned long sp;
+};
+
+static __always_inline unsigned long *stack_pointer(struct task_struct *task)
+{
+ unsigned long *sp;
+
+ if (!task || task == current)
+ __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
+ else
+ sp = (unsigned long *)task->thread.sp;
+
+ return sp;
+}
+
+void walk_stackframe(unsigned long *sp,
+ int (*fn)(struct stackframe *frame, void *data),
+ void *data);
+
+#endif /* _XTENSA_STACKTRACE_H */
#define _INTLEVEL(x) XCHAL_INT ## x ## _LEVEL
#define INTLEVEL(x) _INTLEVEL(x)
-#if INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
+#if XCHAL_NUM_TIMERS > 0 && \
+ INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 0
# define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
-#elif INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
+#elif XCHAL_NUM_TIMERS > 1 && \
+ INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 1
# define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
-#elif INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
+#elif XCHAL_NUM_TIMERS > 2 && \
+ INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
# define LINUX_TIMER 2
# define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
#else
static inline void spill_registers(void)
{
- unsigned int a0, ps;
__asm__ __volatile__ (
- "movi a14, " __stringify(PS_EXCM_BIT | LOCKLEVEL) "\n\t"
+ "movi a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t"
"mov a12, a0\n\t"
"rsr a13, sar\n\t"
"xsr a14, ps\n\t"
"mov a0, a12\n\t"
"wsr a13, sar\n\t"
"wsr a14, ps\n\t"
- : : "a" (&a0), "a" (&ps)
+ : :
#if defined(CONFIG_FRAME_POINTER)
: "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15",
#else
--- /dev/null
+/*
+ * arch/xtensa/include/asm/xchal_vaddr_remap.h
+ *
+ * Xtensa macros for MMU V3 Support. Deals with re-mapping the Virtual
+ * Memory Addresses from "Virtual == Physical" to their prevvious V2 MMU
+ * mappings (KSEG at 0xD0000000 and KIO at 0XF0000000).
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2012 Tensilica Inc.
+ *
+ * Pete Delaney <piet@tensilica.com>
+ * Marc Gauthier <marc@tensilica.com
+ */
+
+#ifndef _XTENSA_VECTORS_H
+#define _XTENSA_VECTORS_H
+
+#include <variant/core.h>
+
+#if defined(CONFIG_MMU)
+
+/* Will Become VECBASE */
+#define VIRTUAL_MEMORY_ADDRESS 0xD0000000
+
+/* Image Virtual Start Address */
+#define KERNELOFFSET 0xD0003000
+
+#if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+ /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */
+ #define PHYSICAL_MEMORY_ADDRESS 0x00000000
+ #define LOAD_MEMORY_ADDRESS 0x00003000
+#else
+ /* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */
+ #define PHYSICAL_MEMORY_ADDRESS 0xD0000000
+ #define LOAD_MEMORY_ADDRESS 0xD0003000
+#endif
+
+#else /* !defined(CONFIG_MMU) */
+ /* MMU Not being used - Virtual == Physical */
+
+ /* VECBASE */
+ #define VIRTUAL_MEMORY_ADDRESS 0x00002000
+
+ /* Location of the start of the kernel text, _start */
+ #define KERNELOFFSET 0x00003000
+ #define PHYSICAL_MEMORY_ADDRESS 0x00000000
+
+ /* Loaded just above possibly live vectors */
+ #define LOAD_MEMORY_ADDRESS 0x00003000
+
+#endif /* CONFIG_MMU */
+
+#define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset)
+#define XC_PADDR(offset) (PHYSICAL_MEMORY_ADDRESS + offset)
+
+/* Used to set VECBASE register */
+#define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS
+
+#define RESET_VECTOR_VECOFS (XCHAL_RESET_VECTOR_VADDR - \
+ VECBASE_RESET_VADDR)
+#define RESET_VECTOR_VADDR XC_VADDR(RESET_VECTOR_VECOFS)
+
+#define RESET_VECTOR1_VECOFS (XCHAL_RESET_VECTOR1_VADDR - \
+ VECBASE_RESET_VADDR)
+#define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS)
+
+#if XCHAL_HAVE_VECBASE
+
+#define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS)
+#define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS)
+#define DOUBLEEXC_VECTOR_VADDR XC_VADDR(XCHAL_DOUBLEEXC_VECOFS)
+#define WINDOW_VECTORS_VADDR XC_VADDR(XCHAL_WINDOW_OF4_VECOFS)
+#define INTLEVEL2_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL2_VECOFS)
+#define INTLEVEL3_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL3_VECOFS)
+#define INTLEVEL4_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL4_VECOFS)
+#define INTLEVEL5_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL5_VECOFS)
+#define INTLEVEL6_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL6_VECOFS)
+
+#define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS)
+
+#undef XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS)
+
+#undef XCHAL_INTLEVEL7_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS)
+
+/*
+ * These XCHAL_* #defines from varian/core.h
+ * are not valid to use with V3 MMU. Non-XCHAL
+ * constants are defined above and should be used.
+ */
+#undef XCHAL_VECBASE_RESET_VADDR
+#undef XCHAL_RESET_VECTOR0_VADDR
+#undef XCHAL_USER_VECTOR_VADDR
+#undef XCHAL_KERNEL_VECTOR_VADDR
+#undef XCHAL_DOUBLEEXC_VECTOR_VADDR
+#undef XCHAL_WINDOW_VECTORS_VADDR
+#undef XCHAL_INTLEVEL2_VECTOR_VADDR
+#undef XCHAL_INTLEVEL3_VECTOR_VADDR
+#undef XCHAL_INTLEVEL4_VECTOR_VADDR
+#undef XCHAL_INTLEVEL5_VECTOR_VADDR
+#undef XCHAL_INTLEVEL6_VECTOR_VADDR
+#undef XCHAL_DEBUG_VECTOR_VADDR
+#undef XCHAL_NMI_VECTOR_VADDR
+#undef XCHAL_INTLEVEL7_VECTOR_VADDR
+
+#else
+
+#define USER_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR
+#define KERNEL_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR
+#define DOUBLEEXC_VECTOR_VADDR XCHAL_DOUBLEEXC_VECTOR_VADDR
+#define WINDOW_VECTORS_VADDR XCHAL_WINDOW_VECTORS_VADDR
+#define INTLEVEL2_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR
+#define INTLEVEL3_VECTOR_VADDR XCHAL_INTLEVEL3_VECTOR_VADDR
+#define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
+#define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR
+#define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR
+
+#endif
+
+#endif /* _XTENSA_VECTORS_H */
extra-y := head.o vmlinux.lds
-obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \
- setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \
- pci-dma.o
+obj-y := align.o coprocessor.o entry.o irq.o pci-dma.o platform.o process.o \
+ ptrace.o setup.o signal.o stacktrace.o syscall.o time.o traps.o \
+ vectors.o
obj-$(CONFIG_KGDB) += xtensa-stub.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
+AFLAGS_head.o += -mtext-section-literals
+
# In the Xtensa architecture, assembly generates literals which must always
# precede the L32R instruction with a relative offset less than 256 kB.
# Therefore, the .text and .literal section must be combined in parenthesis
* so we can allow exceptions and interrupts (*) again.
* Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
*
- * (*) We only allow interrupts of higher priority than current IRQ
+ * (*) We only allow interrupts if they were previously enabled and
+ * we're not handling an IRQ
*/
rsr a3, ps
- addi a0, a0, -4
- movi a2, 1
+ addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT
+ movi a2, LOCKLEVEL
extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
# a3 = PS.INTLEVEL
- movnez a2, a3, a3 # a2 = 1: level-1, > 1: high priority
- moveqz a3, a2, a0 # a3 = IRQ level iff interrupt
+ moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt
movi a2, 1 << PS_WOE_BIT
or a3, a3, a2
rsr a0, exccause
save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
+#ifdef CONFIG_TRACE_IRQFLAGS
+ l32i a4, a1, PT_DEPC
+ /* Double exception means we came here with an exception
+ * while PS.EXCM was set, i.e. interrupts disabled.
+ */
+ bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+ l32i a4, a1, PT_EXCCAUSE
+ bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
+ /* We came here with an interrupt means interrupts were enabled
+ * and we've just disabled them.
+ */
+ movi a4, trace_hardirqs_off
+ callx4 a4
+1:
+#endif
+
/* Go to second-level dispatcher. Set up parameters to pass to the
* exception handler and call the exception handler.
*/
.global common_exception_return
common_exception_return:
+#ifdef CONFIG_TRACE_IRQFLAGS
+ l32i a4, a1, PT_DEPC
+ /* Double exception means we came here with an exception
+ * while PS.EXCM was set, i.e. interrupts disabled.
+ */
+ bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+ l32i a4, a1, PT_EXCCAUSE
+ bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
+ /* We came here with an interrupt means interrupts were enabled
+ * and we'll reenable them on return.
+ */
+ movi a4, trace_hardirqs_on
+ callx4 a4
+1:
+#endif
+
/* Jump if we are returning from kernel exceptions. */
1: l32i a3, a1, PT_PS
_bbci.l a3, PS_UM_BIT, 4f
+ rsil a2, 0
+
/* Specific to a user exception exit:
* We need to check some flags for signal handling and rescheduling,
* and have to restore WB and WS, extra states, and all registers
l32i a0, a1, PT_DEPC
l32i a3, a1, PT_AREG3
- _bltui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
-
- wsr a0, depc
l32i a2, a1, PT_AREG2
- l32i a0, a1, PT_AREG0
- l32i a1, a1, PT_AREG1
- rfde
+ _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
-1:
/* Restore a0...a3 and return */
- rsr a0, ps
- extui a2, a0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
- movi a0, 2f
- slli a2, a2, 4
- add a0, a2, a0
- l32i a2, a1, PT_AREG2
- jx a0
-
- .macro irq_exit_level level
- .align 16
- .if XCHAL_EXCM_LEVEL >= \level
- l32i a0, a1, PT_PC
- wsr a0, epc\level
l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1
- rfi \level
- .endif
- .endm
+ rfe
- .align 16
-2:
+1: wsr a0, depc
l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1
- rfe
-
- .align 16
- /* no rfi for level-1 irq, handled by rfe above*/
- nop
-
- irq_exit_level 2
- irq_exit_level 3
- irq_exit_level 4
- irq_exit_level 5
- irq_exit_level 6
+ rfde
ENDPROC(kernel_exception)
*/
__HEAD
+ .begin no-absolute-literals
+
ENTRY(_start)
- _j 2f
+ /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
+ wsr a2, excsave1
+ _j _SetupMMU
+
+ .align 4
+ .literal_position
+.Lstartup:
+ .word _startup
+
.align 4
-1: .word _startup
-2: l32r a0, 1b
+ .global _SetupMMU
+_SetupMMU:
+ Offset = _SetupMMU - _start
+
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#endif
+ .end no-absolute-literals
+
+ l32r a0, .Lstartup
jx a0
ENDPROC(_start)
- .section .init.text, "ax"
+ __INIT
+ .literal_position
ENTRY(_startup)
movi a0, LOCKLEVEL
wsr a0, ps
- /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
-
- wsr a2, excsave1
-
/* Start with a fresh windowbase and windowstart. */
movi a1, 1
/* Clear debugging registers. */
#if XCHAL_HAVE_DEBUG
+#if XCHAL_NUM_IBREAK > 0
wsr a0, ibreakenable
+#endif
wsr a0, icount
movi a1, 15
wsr a0, icountlevel
isync
- initialize_mmu
-
/* Unpack data sections
*
* The linker script used to build the Linux kernel image
___flush_dcache_all a2 a3
#endif
+ memw
+ isync
+ ___invalidate_icache_all a2 a3
+ isync
/* Setup stack and enable window exceptions (keep irqs disabled) */
_F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
_F(void, heartbeat, (void), { });
_F(int, pcibios_fixup, (void), { return 0; });
+_F(void, pcibios_init, (void), { });
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
_F(void, calibrate_ccount, (void),
--- /dev/null
+/*
+ * arch/xtensa/kernel/stacktrace.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+
+#include <asm/stacktrace.h>
+#include <asm/traps.h>
+
+void walk_stackframe(unsigned long *sp,
+ int (*fn)(struct stackframe *frame, void *data),
+ void *data)
+{
+ unsigned long a0, a1;
+ unsigned long sp_end;
+
+ a1 = (unsigned long)sp;
+ sp_end = ALIGN(a1, THREAD_SIZE);
+
+ spill_registers();
+
+ while (a1 < sp_end) {
+ struct stackframe frame;
+
+ sp = (unsigned long *)a1;
+
+ a0 = *(sp - 4);
+ a1 = *(sp - 3);
+
+ if (a1 <= (unsigned long)sp)
+ break;
+
+ frame.pc = MAKE_PC_FROM_RA(a0, a1);
+ frame.sp = a1;
+
+ if (fn(&frame, data))
+ return;
+ }
+}
+
+#ifdef CONFIG_STACKTRACE
+
+struct stack_trace_data {
+ struct stack_trace *trace;
+ unsigned skip;
+};
+
+static int stack_trace_cb(struct stackframe *frame, void *data)
+{
+ struct stack_trace_data *trace_data = data;
+ struct stack_trace *trace = trace_data->trace;
+
+ if (trace_data->skip) {
+ --trace_data->skip;
+ return 0;
+ }
+ if (!kernel_text_address(frame->pc))
+ return 0;
+
+ trace->entries[trace->nr_entries++] = frame->pc;
+ return trace->nr_entries >= trace->max_entries;
+}
+
+void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
+{
+ struct stack_trace_data trace_data = {
+ .trace = trace,
+ .skip = trace->skip,
+ };
+ walk_stackframe(stack_pointer(task), stack_trace_cb, &trace_data);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ save_stack_trace_tsk(current, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+#endif
+
+#ifdef CONFIG_FRAME_POINTER
+
+struct return_addr_data {
+ unsigned long addr;
+ unsigned skip;
+};
+
+static int return_address_cb(struct stackframe *frame, void *data)
+{
+ struct return_addr_data *r = data;
+
+ if (r->skip) {
+ --r->skip;
+ return 0;
+ }
+ if (!kernel_text_address(frame->pc))
+ return 0;
+ r->addr = frame->pc;
+ return 1;
+}
+
+unsigned long return_address(unsigned level)
+{
+ struct return_addr_data r = {
+ .skip = level + 1,
+ };
+ walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
+ return r.addr;
+}
+EXPORT_SYMBOL(return_address);
+
+#endif
*
* Essentially rewritten for the Xtensa architecture port.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
*
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
* Chris Zankel <chris@zankel.net>
#include <linux/delay.h>
#include <linux/hardirq.h>
+#include <asm/stacktrace.h>
#include <asm/ptrace.h>
#include <asm/timex.h>
#include <asm/uaccess.h>
/*
* IRQ handler.
- * PS.INTLEVEL is the current IRQ priority level.
*/
extern void do_IRQ(int, struct pt_regs *);
XCHAL_INTLEVEL6_MASK,
XCHAL_INTLEVEL7_MASK,
};
- unsigned level = get_sr(ps) & PS_INTLEVEL_MASK;
-
- if (WARN_ON_ONCE(level >= ARRAY_SIZE(int_level_mask)))
- return;
for (;;) {
unsigned intread = get_sr(interrupt);
unsigned intenable = get_sr(intenable);
- unsigned int_at_level = intread & intenable &
- int_level_mask[level];
+ unsigned int_at_level = intread & intenable;
+ unsigned level;
+
+ for (level = LOCKLEVEL; level > 0; --level) {
+ if (int_at_level & int_level_mask[level]) {
+ int_at_level &= int_level_mask[level];
+ break;
+ }
+ }
- if (!int_at_level)
+ if (level == 0)
return;
/*
regs->syscall);
}
-static __always_inline unsigned long *stack_pointer(struct task_struct *task)
+static int show_trace_cb(struct stackframe *frame, void *data)
{
- unsigned long *sp;
-
- if (!task || task == current)
- __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
- else
- sp = (unsigned long *)task->thread.sp;
-
- return sp;
+ if (kernel_text_address(frame->pc)) {
+ printk(" [<%08lx>] ", frame->pc);
+ print_symbol("%s\n", frame->pc);
+ }
+ return 0;
}
void show_trace(struct task_struct *task, unsigned long *sp)
{
- unsigned long a0, a1, pc;
- unsigned long sp_start, sp_end;
-
- if (sp)
- a1 = (unsigned long)sp;
- else
- a1 = (unsigned long)stack_pointer(task);
-
- sp_start = a1 & ~(THREAD_SIZE-1);
- sp_end = sp_start + THREAD_SIZE;
+ if (!sp)
+ sp = stack_pointer(task);
printk("Call Trace:");
#ifdef CONFIG_KALLSYMS
printk("\n");
#endif
- spill_registers();
-
- while (a1 > sp_start && a1 < sp_end) {
- sp = (unsigned long*)a1;
-
- a0 = *(sp - 4);
- a1 = *(sp - 3);
-
- if (a1 <= (unsigned long) sp)
- break;
-
- pc = MAKE_PC_FROM_RA(a0, a1);
-
- if (kernel_text_address(pc)) {
- printk(" [<%08lx>] ", pc);
- print_symbol("%s\n", pc);
- }
- }
+ walk_stackframe(sp, show_trace_cb, NULL);
printk("\n");
}
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/thread_info.h>
+#include <asm/vectors.h>
#define WINDOW_VECTORS_SIZE 0x180
xsr a0, depc # get DEPC, save a0
- movi a3, XCHAL_WINDOW_VECTORS_VADDR
+ movi a3, WINDOW_VECTORS_VADDR
_bltu a0, a3, .Lfixup
addi a3, a3, WINDOW_VECTORS_SIZE
_bgeu a0, a3, .Lfixup
.if XCHAL_EXCM_LEVEL >= \level
.section .Level\level\()InterruptVector.text, "ax"
ENTRY(_Level\level\()InterruptVector)
- wsr a0, epc1
+ wsr a0, excsave2
rsr a0, epc\level
- xsr a0, epc1
+ wsr a0, epc1
+ movi a0, EXCCAUSE_LEVEL1_INTERRUPT
+ wsr a0, exccause
+ rsr a0, eps\level
# branch to user or kernel vector
j _SimulateUserKernelVectorException
.endif
*/
.align 4
_SimulateUserKernelVectorException:
- wsr a0, excsave2
- movi a0, 4 # LEVEL1_INTERRUPT cause
- wsr a0, exccause
- rsr a0, ps
+ addi a0, a0, (1 << PS_EXCM_BIT)
+ wsr a0, ps
bbsi.l a0, PS_UM_BIT, 1f # branch if user mode
rsr a0, excsave2 # restore a0
j _KernelExceptionVector # simulate kernel vector exception
#include <asm/page.h>
#include <asm/thread_info.h>
+#include <asm/vectors.h>
#include <variant/core.h>
#include <platform/hardware.h>
OUTPUT_ARCH(xtensa)
#endif
#ifndef KERNELOFFSET
-#define KERNELOFFSET 0xd0001000
+#define KERNELOFFSET 0xd0003000
#endif
/* Note: In the following macros, it would be nice to specify only the
SECTION_VECTOR (_WindowVectors_text,
.WindowVectors.text,
- XCHAL_WINDOW_VECTORS_VADDR, 4,
+ WINDOW_VECTORS_VADDR, 4,
.dummy)
SECTION_VECTOR (_DebugInterruptVector_literal,
.DebugInterruptVector.literal,
- XCHAL_DEBUG_VECTOR_VADDR - 4,
+ DEBUG_VECTOR_VADDR - 4,
SIZEOF(.WindowVectors.text),
.WindowVectors.text)
SECTION_VECTOR (_DebugInterruptVector_text,
.DebugInterruptVector.text,
- XCHAL_DEBUG_VECTOR_VADDR,
+ DEBUG_VECTOR_VADDR,
4,
.DebugInterruptVector.literal)
#undef LAST
#if XCHAL_EXCM_LEVEL >= 2
SECTION_VECTOR (_Level2InterruptVector_text,
.Level2InterruptVector.text,
- XCHAL_INTLEVEL2_VECTOR_VADDR,
+ INTLEVEL2_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level2InterruptVector.text
#if XCHAL_EXCM_LEVEL >= 3
SECTION_VECTOR (_Level3InterruptVector_text,
.Level3InterruptVector.text,
- XCHAL_INTLEVEL3_VECTOR_VADDR,
+ INTLEVEL3_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level3InterruptVector.text
#if XCHAL_EXCM_LEVEL >= 4
SECTION_VECTOR (_Level4InterruptVector_text,
.Level4InterruptVector.text,
- XCHAL_INTLEVEL4_VECTOR_VADDR,
+ INTLEVEL4_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level4InterruptVector.text
#if XCHAL_EXCM_LEVEL >= 5
SECTION_VECTOR (_Level5InterruptVector_text,
.Level5InterruptVector.text,
- XCHAL_INTLEVEL5_VECTOR_VADDR,
+ INTLEVEL5_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level5InterruptVector.text
#if XCHAL_EXCM_LEVEL >= 6
SECTION_VECTOR (_Level6InterruptVector_text,
.Level6InterruptVector.text,
- XCHAL_INTLEVEL6_VECTOR_VADDR,
+ INTLEVEL6_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level6InterruptVector.text
#endif
SECTION_VECTOR (_KernelExceptionVector_literal,
.KernelExceptionVector.literal,
- XCHAL_KERNEL_VECTOR_VADDR - 4,
+ KERNEL_VECTOR_VADDR - 4,
SIZEOF(LAST), LAST)
#undef LAST
SECTION_VECTOR (_KernelExceptionVector_text,
.KernelExceptionVector.text,
- XCHAL_KERNEL_VECTOR_VADDR,
+ KERNEL_VECTOR_VADDR,
4,
.KernelExceptionVector.literal)
SECTION_VECTOR (_UserExceptionVector_literal,
.UserExceptionVector.literal,
- XCHAL_USER_VECTOR_VADDR - 4,
+ USER_VECTOR_VADDR - 4,
SIZEOF(.KernelExceptionVector.text),
.KernelExceptionVector.text)
SECTION_VECTOR (_UserExceptionVector_text,
.UserExceptionVector.text,
- XCHAL_USER_VECTOR_VADDR,
+ USER_VECTOR_VADDR,
4,
.UserExceptionVector.literal)
SECTION_VECTOR (_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal,
- XCHAL_DOUBLEEXC_VECTOR_VADDR - 16,
+ DOUBLEEXC_VECTOR_VADDR - 16,
SIZEOF(.UserExceptionVector.text),
.UserExceptionVector.text)
SECTION_VECTOR (_DoubleExceptionVector_text,
.DoubleExceptionVector.text,
- XCHAL_DOUBLEEXC_VECTOR_VADDR,
+ DOUBLEEXC_VECTOR_VADDR,
32,
.DoubleExceptionVector.literal)
. = ALIGN(0x10);
.bootstrap : { *(.bootstrap.literal .bootstrap.text .bootstrap.data) }
- .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
+ .ResetVector.text RESET_VECTOR_VADDR :
{
*(.ResetVector.text)
}
+
+ /*
+ * This is a remapped copy of the Secondary Reset Vector Code.
+ * It keeps gdb in sync with the PC after switching
+ * to the temporary mapping used while setting up
+ * the V2 MMU mappings for Linux.
+ *
+ * Only debug information about this section is put in the kernel image.
+ */
+ .SecondaryResetVector.remapped_text 0x46000000 (INFO):
+ {
+ *(.SecondaryResetVector.remapped_text)
+ }
+
+
.xt.lit : { *(.xt.lit) }
.xt.prop : { *(.xt.prop) }
EXPORT_SYMBOL(insb);
EXPORT_SYMBOL(insw);
EXPORT_SYMBOL(insl);
+
+extern long common_exception_return;
+extern long _spill_registers;
+EXPORT_SYMBOL(common_exception_return);
+EXPORT_SYMBOL(_spill_registers);
*/
void __init init_mmu(void)
{
- /* Writing zeros to the <t>TLBCFG special registers ensure
- * that valid values exist in the register. For existing
- * PGSZID<w> fields, zero selects the first element of the
- * page-size array. For nonexistent PGSZID<w> fields, zero is
- * the best value to write. Also, when changing PGSZID<w>
+#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
+ /*
+ * Writing zeros to the instruction and data TLBCFG special
+ * registers ensure that valid values exist in the register.
+ *
+ * For existing PGSZID<w> fields, zero selects the first element
+ * of the page-size array. For nonexistent PGSZID<w> fields,
+ * zero is the best value to write. Also, when changing PGSZID<w>
* fields, the corresponding TLB must be flushed.
*/
set_itlbcfg_register(0);
set_dtlbcfg_register(0);
+#endif
flush_tlb_all();
/* Set rasid register to a known value. */
pc = MAKE_PC_FROM_RA(a0, pc);
/* Add the PC to the trace. */
- if (kernel_text_address(pc))
- oprofile_add_trace(pc);
-
+ oprofile_add_trace(pc);
if (pc == (unsigned long) &common_exception_return) {
regs = (struct pt_regs *)a1;
if (user_mode(regs)) {
static int rs_open(struct tty_struct *tty, struct file * filp)
{
tty->port = &serial_port;
- spin_lock(&timer_lock);
+ spin_lock_bh(&timer_lock);
if (tty->count == 1) {
setup_timer(&serial_timer, rs_poll,
(unsigned long)&serial_port);
mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
}
- spin_unlock(&timer_lock);
+ spin_unlock_bh(&timer_lock);
return 0;
}
static void rs_poll(unsigned long priv)
{
struct tty_port *port = (struct tty_port *)priv;
- struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
int i = 0;
unsigned char c;
spin_lock(&timer_lock);
- while (__simc(SYS_select_one, 0, XTISS_SELECT_ONE_READ, (int)&tv,0,0)){
- __simc (SYS_read, 0, (unsigned long)&c, 1, 0, 0);
+ while (simc_poll(0)) {
+ simc_read(0, &c, 1);
tty_insert_flip_char(port, c, TTY_NORMAL);
i++;
}
int len = strlen(s);
if (s != 0 && *s != 0)
- __simc (SYS_write, 1, (unsigned long)s,
- count < len ? count : len,0,0);
+ simc_write(1, s, count < len ? count : len);
}
static struct tty_driver* iss_console_device(struct console *c, int *index)
static int errno;
-static inline int __simc(int a, int b, int c, int d, int e, int f)
+static inline int __simc(int a, int b, int c, int d)
{
int ret;
register int a1 asm("a2") = a;
register int b1 asm("a3") = b;
register int c1 asm("a4") = c;
register int d1 asm("a5") = d;
- register int e1 asm("a6") = e;
- register int f1 asm("a7") = f;
__asm__ __volatile__ (
"simcall\n"
"mov %0, a2\n"
"mov %1, a3\n"
: "=a" (ret), "=a" (errno), "+r"(a1), "+r"(b1)
- : "r"(c1), "r"(d1), "r"(e1), "r"(f1)
+ : "r"(c1), "r"(d1)
: "memory");
return ret;
}
static inline int simc_open(const char *file, int flags, int mode)
{
- return __simc(SYS_open, (int) file, flags, mode, 0, 0);
+ return __simc(SYS_open, (int) file, flags, mode);
}
static inline int simc_close(int fd)
{
- return __simc(SYS_close, fd, 0, 0, 0, 0);
+ return __simc(SYS_close, fd, 0, 0);
}
static inline int simc_ioctl(int fd, int request, void *arg)
{
- return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0);
+ return __simc(SYS_ioctl, fd, request, (int) arg);
}
static inline int simc_read(int fd, void *buf, size_t count)
{
- return __simc(SYS_read, fd, (int) buf, count, 0, 0);
+ return __simc(SYS_read, fd, (int) buf, count);
}
static inline int simc_write(int fd, const void *buf, size_t count)
{
- return __simc(SYS_write, fd, (int) buf, count, 0, 0);
+ return __simc(SYS_write, fd, (int) buf, count);
}
static inline int simc_poll(int fd)
{
struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
- return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv,
- 0, 0);
+ return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv);
+}
+
+static inline int simc_lseek(int fd, uint32_t off, int whence)
+{
+ return __simc(SYS_lseek, fd, off, whence);
}
#endif /* _XTENSA_PLATFORM_ISS_SIMCALL_H */
}
-#ifdef CONFIG_PCI
-void platform_pcibios_init(void)
-{
-}
-#endif
-
void platform_halt(void)
{
pr_info(" ** Called platform_halt() **\n");
"wsr a2, icountlevel\n\t"
"movi a2, 0\n\t"
"wsr a2, icount\n\t"
+#if XCHAL_NUM_IBREAK > 0
"wsr a2, ibreakenable\n\t"
+#endif
"wsr a2, lcount\n\t"
"movi a2, 0x1f\n\t"
"wsr a2, ps\n\t"
while (nbytes > 0) {
unsigned long io;
- __simc(SYS_lseek, dev->fd, offset, SEEK_SET, 0, 0);
+ simc_lseek(dev->fd, offset, SEEK_SET);
if (write)
io = simc_write(dev->fd, buffer, nbytes);
else
err = -ENODEV;
goto out;
}
- dev->size = __simc(SYS_lseek, dev->fd, 0, SEEK_END, 0, 0);
+ dev->size = simc_lseek(dev->fd, 0, SEEK_END);
set_capacity(dev->gd, dev->size >> SECTOR_SHIFT);
dev->filename = filename;
pr_info("SIMDISK: %s=%s\n", dev->gd->disk_name, dev->filename);
size_t size, loff_t *ppos)
{
struct simdisk *dev = PDE_DATA(file_inode(file));
- char *s = dev->filename;
+ const char *s = dev->filename;
if (s) {
ssize_t n = simple_read_from_buffer(buf, size, ppos,
s, strlen(s));
if (tmp == NULL)
return -ENOMEM;
- if (copy_from_user(tmp, buffer, count)) {
+ if (copy_from_user(tmp, buf, count)) {
err = -EFAULT;
goto out_free;
}
"wsr a2, icountlevel\n\t"
"movi a2, 0\n\t"
"wsr a2, icount\n\t"
+#if XCHAL_NUM_IBREAK > 0
"wsr a2, ibreakenable\n\t"
+#endif
"wsr a2, lcount\n\t"
"movi a2, 0x1f\n\t"
"wsr a2, ps\n\t"
"wsr a2, icountlevel\n\t"
"movi a2, 0\n\t"
"wsr a2, icount\n\t"
+#if XCHAL_NUM_IBREAK > 0
"wsr a2, ibreakenable\n\t"
+#endif
"wsr a2, lcount\n\t"
"movi a2, 0x1f\n\t"
"wsr a2, ps\n\t"
if ((obj_desc->common_field.start_field_bit_offset == 0) &&
(obj_desc->common_field.bit_length == access_bit_width)) {
- status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ);
+ if (buffer_length >= sizeof(u64)) {
+ status =
+ acpi_ex_field_datum_io(obj_desc, 0, buffer,
+ ACPI_READ);
+ } else {
+ /* Use raw_datum (u64) to handle buffers < 64 bits */
+
+ status =
+ acpi_ex_field_datum_io(obj_desc, 0, &raw_datum,
+ ACPI_READ);
+ ACPI_MEMCPY(buffer, &raw_datum, buffer_length);
+ }
+
return_ACPI_STATUS(status);
}
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_METHOD, device_node, METHOD_NAME__INI));
+ ACPI_MEMSET(info, 0, sizeof(struct acpi_evaluate_info));
info->prefix_node = device_node;
info->pathname = METHOD_NAME__INI;
info->parameters = NULL;
return_value = 0;
status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
if (ACPI_FAILURE(status)) {
- return (status);
+ acpi_ut_remove_reference(return_desc);
+ return_ACPI_STATUS(status);
}
/* Lookup the interface in the global _OSI list */
#include <linux/serial_reg.h>
#include <linux/time.h>
-static const char *part_probes[] = { "bcm47xxpart", NULL };
+static const char * const part_probes[] = { "bcm47xxpart", NULL };
static struct physmap_flash_data bcma_pflash_data = {
.part_probe_types = part_probes,
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
+nvme-y := nvme-core.o nvme-scsi.o
swim_mod-y := swim.o swim_asm.o
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
-
+#include <scsi/sg.h>
#include <asm-generic/io-64-nonatomic-lo-hi.h>
#define NVME_Q_DEPTH 1024
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
#define NVME_MINORS 64
-#define NVME_IO_TIMEOUT (5 * HZ)
#define ADMIN_TIMEOUT (60 * HZ)
static int nvme_major;
static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
-/*
- * Represents an NVM Express device. Each nvme_dev is a PCI function.
- */
-struct nvme_dev {
- struct list_head node;
- struct nvme_queue **queues;
- u32 __iomem *dbs;
- struct pci_dev *pci_dev;
- struct dma_pool *prp_page_pool;
- struct dma_pool *prp_small_pool;
- int instance;
- int queue_count;
- int db_stride;
- u32 ctrl_config;
- struct msix_entry *entry;
- struct nvme_bar __iomem *bar;
- struct list_head namespaces;
- char serial[20];
- char model[40];
- char firmware_rev[8];
- u32 max_hw_sectors;
-};
-
-/*
- * An NVM Express namespace is equivalent to a SCSI LUN
- */
-struct nvme_ns {
- struct list_head list;
-
- struct nvme_dev *dev;
- struct request_queue *queue;
- struct gendisk *disk;
-
- int ns_id;
- int lba_shift;
-};
-
/*
* An NVM Express queue. Each device has at least two (one for admin
* commands and one for I/O commands).
BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
return ctx;
}
-static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
+struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
{
return dev->queues[get_cpu() + 1];
}
-static void put_nvmeq(struct nvme_queue *nvmeq)
+void put_nvmeq(struct nvme_queue *nvmeq)
{
put_cpu();
}
return 0;
}
-/*
- * The nvme_iod describes the data in an I/O, including the list of PRP
- * entries. You can't see it in this data structure because C doesn't let
- * me express that. Use nvme_alloc_iod to ensure there's enough space
- * allocated to store the PRP list.
- */
-struct nvme_iod {
- void *private; /* For the use of the submitter of the I/O */
- int npages; /* In the PRP list. 0 means small pool in use */
- int offset; /* Of PRP list */
- int nents; /* Used in scatterlist */
- int length; /* Of data, in bytes */
- dma_addr_t first_dma;
- struct scatterlist sg[0];
-};
-
static __le64 **iod_list(struct nvme_iod *iod)
{
return ((void *)iod) + iod->offset;
return iod;
}
-static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
+void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
{
const int last_prp = PAGE_SIZE / 8 - 1;
int i;
kfree(iod);
}
-static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
-{
- struct nvme_queue *nvmeq = get_nvmeq(dev);
- if (bio_list_empty(&nvmeq->sq_cong))
- add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
- bio_list_add(&nvmeq->sq_cong, bio);
- put_nvmeq(nvmeq);
- wake_up_process(nvme_thread);
-}
-
static void bio_completion(struct nvme_dev *dev, void *ctx,
struct nvme_completion *cqe)
{
dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
nvme_free_iod(dev, iod);
- if (status) {
+ if (status)
bio_endio(bio, -EIO);
- } else if (bio->bi_vcnt > bio->bi_idx) {
- requeue_bio(dev, bio);
- } else {
+ else
bio_endio(bio, 0);
- }
}
/* length is in bytes. gfp flags indicates whether we may sleep. */
-static int nvme_setup_prps(struct nvme_dev *dev,
- struct nvme_common_command *cmd, struct nvme_iod *iod,
- int total_len, gfp_t gfp)
+int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
+ struct nvme_iod *iod, int total_len, gfp_t gfp)
{
struct dma_pool *pool;
int length = total_len;
return total_len;
}
+struct nvme_bio_pair {
+ struct bio b1, b2, *parent;
+ struct bio_vec *bv1, *bv2;
+ int err;
+ atomic_t cnt;
+};
+
+static void nvme_bio_pair_endio(struct bio *bio, int err)
+{
+ struct nvme_bio_pair *bp = bio->bi_private;
+
+ if (err)
+ bp->err = err;
+
+ if (atomic_dec_and_test(&bp->cnt)) {
+ bio_endio(bp->parent, bp->err);
+ if (bp->bv1)
+ kfree(bp->bv1);
+ if (bp->bv2)
+ kfree(bp->bv2);
+ kfree(bp);
+ }
+}
+
+static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
+ int len, int offset)
+{
+ struct nvme_bio_pair *bp;
+
+ BUG_ON(len > bio->bi_size);
+ BUG_ON(idx > bio->bi_vcnt);
+
+ bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
+ if (!bp)
+ return NULL;
+ bp->err = 0;
+
+ bp->b1 = *bio;
+ bp->b2 = *bio;
+
+ bp->b1.bi_size = len;
+ bp->b2.bi_size -= len;
+ bp->b1.bi_vcnt = idx;
+ bp->b2.bi_idx = idx;
+ bp->b2.bi_sector += len >> 9;
+
+ if (offset) {
+ bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
+ GFP_ATOMIC);
+ if (!bp->bv1)
+ goto split_fail_1;
+
+ bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
+ GFP_ATOMIC);
+ if (!bp->bv2)
+ goto split_fail_2;
+
+ memcpy(bp->bv1, bio->bi_io_vec,
+ bio->bi_max_vecs * sizeof(struct bio_vec));
+ memcpy(bp->bv2, bio->bi_io_vec,
+ bio->bi_max_vecs * sizeof(struct bio_vec));
+
+ bp->b1.bi_io_vec = bp->bv1;
+ bp->b2.bi_io_vec = bp->bv2;
+ bp->b2.bi_io_vec[idx].bv_offset += offset;
+ bp->b2.bi_io_vec[idx].bv_len -= offset;
+ bp->b1.bi_io_vec[idx].bv_len = offset;
+ bp->b1.bi_vcnt++;
+ } else
+ bp->bv1 = bp->bv2 = NULL;
+
+ bp->b1.bi_private = bp;
+ bp->b2.bi_private = bp;
+
+ bp->b1.bi_end_io = nvme_bio_pair_endio;
+ bp->b2.bi_end_io = nvme_bio_pair_endio;
+
+ bp->parent = bio;
+ atomic_set(&bp->cnt, 2);
+
+ return bp;
+
+ split_fail_2:
+ kfree(bp->bv1);
+ split_fail_1:
+ kfree(bp);
+ return NULL;
+}
+
+static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
+ int idx, int len, int offset)
+{
+ struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset);
+ if (!bp)
+ return -ENOMEM;
+
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ bio_list_add(&nvmeq->sq_cong, &bp->b1);
+ bio_list_add(&nvmeq->sq_cong, &bp->b2);
+
+ return 0;
+}
+
/* NVMe scatterlists require no holes in the virtual address */
#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
(((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
-static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
+static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
struct bio *bio, enum dma_data_direction dma_dir, int psegs)
{
struct bio_vec *bvec, *bvprv = NULL;
struct scatterlist *sg = NULL;
- int i, old_idx, length = 0, nsegs = 0;
+ int i, length = 0, nsegs = 0, split_len = bio->bi_size;
+
+ if (nvmeq->dev->stripe_size)
+ split_len = nvmeq->dev->stripe_size -
+ ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
sg_init_table(iod->sg, psegs);
- old_idx = bio->bi_idx;
bio_for_each_segment(bvec, bio, i) {
if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
sg->length += bvec->bv_len;
} else {
if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
- break;
+ return nvme_split_and_submit(bio, nvmeq, i,
+ length, 0);
+
sg = sg ? sg + 1 : iod->sg;
sg_set_page(sg, bvec->bv_page, bvec->bv_len,
bvec->bv_offset);
nsegs++;
}
+
+ if (split_len - length < bvec->bv_len)
+ return nvme_split_and_submit(bio, nvmeq, i, split_len,
+ split_len - length);
length += bvec->bv_len;
bvprv = bvec;
}
- bio->bi_idx = i;
iod->nents = nsegs;
sg_mark_end(sg);
- if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
- bio->bi_idx = old_idx;
+ if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
return -ENOMEM;
- }
+
+ BUG_ON(length != bio->bi_size);
return length;
}
+/*
+ * We reuse the small pool to allocate the 16-byte range here as it is not
+ * worth having a special pool for these or additional cases to handle freeing
+ * the iod.
+ */
+static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+ struct bio *bio, struct nvme_iod *iod, int cmdid)
+{
+ struct nvme_dsm_range *range;
+ struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+ range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
+ &iod->first_dma);
+ if (!range)
+ return -ENOMEM;
+
+ iod_list(iod)[0] = (__le64 *)range;
+ iod->npages = 0;
+
+ range->cattr = cpu_to_le32(0);
+ range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
+ range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->dsm.opcode = nvme_cmd_dsm;
+ cmnd->dsm.command_id = cmdid;
+ cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
+ cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
+ cmnd->dsm.nr = 0;
+ cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+
+ if (++nvmeq->sq_tail == nvmeq->q_depth)
+ nvmeq->sq_tail = 0;
+ writel(nvmeq->sq_tail, nvmeq->q_db);
+
+ return 0;
+}
+
static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
int cmdid)
{
return 0;
}
-static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
+int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
{
int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
special_completion, NVME_IO_TIMEOUT);
if (unlikely(cmdid < 0))
goto free_iod;
+ if (bio->bi_rw & REQ_DISCARD) {
+ result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
+ if (result)
+ goto free_cmdid;
+ return result;
+ }
if ((bio->bi_rw & REQ_FLUSH) && !psegs)
return nvme_submit_flush(nvmeq, ns, cmdid);
dma_dir = DMA_FROM_DEVICE;
}
- result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
- if (result < 0)
+ result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs);
+ if (result <= 0)
goto free_cmdid;
length = result;
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
GFP_ATOMIC);
- cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
+ cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
cmnd->rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
- bio->bi_sector += length >> 9;
-
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
writel(nvmeq->sq_tail, nvmeq->q_db);
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
*/
-static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
- struct nvme_command *cmd, u32 *result, unsigned timeout)
+int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
+ u32 *result, unsigned timeout)
{
int cmdid;
struct sync_cmd_info cmdinfo;
set_current_state(TASK_KILLABLE);
nvme_submit_cmd(nvmeq, cmd);
- schedule();
+ schedule_timeout(timeout);
if (cmdinfo.status == -EINTR) {
nvme_abort_command(nvmeq, cmdid);
return cmdinfo.status;
}
-static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
u32 *result)
{
return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}
-static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
+int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
dma_addr_t dma_addr)
{
struct nvme_command c;
return nvme_submit_admin_cmd(dev, &c, NULL);
}
-static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
+int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
dma_addr_t dma_addr, u32 *result)
{
struct nvme_command c;
return nvme_submit_admin_cmd(dev, &c, result);
}
-static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
- unsigned dword11, dma_addr_t dma_addr, u32 *result)
+int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
+ dma_addr_t dma_addr, u32 *result)
{
struct nvme_command c;
void *ctx;
nvme_completion_fn fn;
static struct nvme_completion cqe = {
- .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1,
+ .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
};
if (timeout && !time_after(now, info[cmdid].timeout))
return nvmeq;
free_cqdma:
- dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
+ dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
nvmeq->cq_dma_addr);
free_nvmeq:
kfree(nvmeq);
return ERR_PTR(result);
}
+static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
+{
+ unsigned long timeout;
+ u32 bit = enabled ? NVME_CSTS_RDY : 0;
+
+ timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+
+ while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) {
+ msleep(100);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ dev_err(&dev->pci_dev->dev,
+ "Device not ready; aborting initialisation\n");
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * If the device has been passed off to us in an enabled state, just clear
+ * the enabled bit. The spec says we should set the 'shutdown notification
+ * bits', but doing so may cause the device to complete commands to the
+ * admin queue ... and we don't know what memory that might be pointing at!
+ */
+static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
+{
+ u32 cc = readl(&dev->bar->cc);
+
+ if (cc & NVME_CC_ENABLE)
+ writel(cc & ~NVME_CC_ENABLE, &dev->bar->cc);
+ return nvme_wait_ready(dev, cap, false);
+}
+
+static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
+{
+ return nvme_wait_ready(dev, cap, true);
+}
+
static int nvme_configure_admin_queue(struct nvme_dev *dev)
{
- int result = 0;
+ int result;
u32 aqa;
- u64 cap;
- unsigned long timeout;
+ u64 cap = readq(&dev->bar->cap);
struct nvme_queue *nvmeq;
dev->dbs = ((void __iomem *)dev->bar) + 4096;
+ dev->db_stride = NVME_CAP_STRIDE(cap);
+
+ result = nvme_disable_ctrl(dev, cap);
+ if (result < 0)
+ return result;
nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
if (!nvmeq)
dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
- writel(0, &dev->bar->cc);
writel(aqa, &dev->bar->aqa);
writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
writel(dev->ctrl_config, &dev->bar->cc);
- cap = readq(&dev->bar->cap);
- timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
- dev->db_stride = NVME_CAP_STRIDE(cap);
-
- while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
- msleep(100);
- if (fatal_signal_pending(current))
- result = -EINTR;
- if (time_after(jiffies, timeout)) {
- dev_err(&dev->pci_dev->dev,
- "Device not ready; aborting initialisation\n");
- result = -ENODEV;
- }
- }
-
- if (result) {
- nvme_free_queue_mem(nvmeq);
- return result;
- }
+ result = nvme_enable_ctrl(dev, cap);
+ if (result)
+ goto free_q;
result = queue_request_irq(dev, nvmeq, "nvme admin");
+ if (result)
+ goto free_q;
+
dev->queues[0] = nvmeq;
return result;
+
+ free_q:
+ nvme_free_queue_mem(nvmeq);
+ return result;
}
-static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
+struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
unsigned long addr, unsigned length)
{
int i, err, count, nents, offset;
return ERR_PTR(err);
}
-static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
struct nvme_iod *iod)
{
int i;
struct nvme_queue *nvmeq;
struct nvme_user_io io;
struct nvme_command c;
- unsigned length;
- int status;
- struct nvme_iod *iod;
+ unsigned length, meta_len;
+ int status, i;
+ struct nvme_iod *iod, *meta_iod = NULL;
+ dma_addr_t meta_dma_addr;
+ void *meta, *uninitialized_var(meta_mem);
if (copy_from_user(&io, uio, sizeof(io)))
return -EFAULT;
length = (io.nblocks + 1) << ns->lba_shift;
+ meta_len = (io.nblocks + 1) * ns->ms;
+
+ if (meta_len && ((io.metadata & 3) || !io.metadata))
+ return -EINVAL;
switch (io.opcode) {
case nvme_cmd_write:
c.rw.slba = cpu_to_le64(io.slba);
c.rw.length = cpu_to_le16(io.nblocks);
c.rw.control = cpu_to_le16(io.control);
- c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
- c.rw.reftag = io.reftag;
- c.rw.apptag = io.apptag;
- c.rw.appmask = io.appmask;
- /* XXX: metadata */
+ c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
+ c.rw.reftag = cpu_to_le32(io.reftag);
+ c.rw.apptag = cpu_to_le16(io.apptag);
+ c.rw.appmask = cpu_to_le16(io.appmask);
+
+ if (meta_len) {
+ meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, meta_len);
+ if (IS_ERR(meta_iod)) {
+ status = PTR_ERR(meta_iod);
+ meta_iod = NULL;
+ goto unmap;
+ }
+
+ meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
+ &meta_dma_addr, GFP_KERNEL);
+ if (!meta_mem) {
+ status = -ENOMEM;
+ goto unmap;
+ }
+
+ if (io.opcode & 1) {
+ int meta_offset = 0;
+
+ for (i = 0; i < meta_iod->nents; i++) {
+ meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
+ meta_iod->sg[i].offset;
+ memcpy(meta_mem + meta_offset, meta,
+ meta_iod->sg[i].length);
+ kunmap_atomic(meta);
+ meta_offset += meta_iod->sg[i].length;
+ }
+ }
+
+ c.rw.metadata = cpu_to_le64(meta_dma_addr);
+ }
+
length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
nvmeq = get_nvmeq(dev);
else
status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+ if (meta_len) {
+ if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
+ int meta_offset = 0;
+
+ for (i = 0; i < meta_iod->nents; i++) {
+ meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
+ meta_iod->sg[i].offset;
+ memcpy(meta, meta_mem + meta_offset,
+ meta_iod->sg[i].length);
+ kunmap_atomic(meta);
+ meta_offset += meta_iod->sg[i].length;
+ }
+ }
+
+ dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem,
+ meta_dma_addr);
+ }
+
+ unmap:
nvme_unmap_user_pages(dev, io.opcode & 1, iod);
nvme_free_iod(dev, iod);
+
+ if (meta_iod) {
+ nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod);
+ nvme_free_iod(dev, meta_iod);
+ }
+
return status;
}
struct nvme_command c;
int status, length;
struct nvme_iod *uninitialized_var(iod);
+ unsigned timeout;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
GFP_KERNEL);
}
+ timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
+ ADMIN_TIMEOUT;
if (length != cmd.data_len)
status = -ENOMEM;
else
- status = nvme_submit_admin_cmd(dev, &c, &cmd.result);
+ status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result,
+ timeout);
if (cmd.data_len) {
nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
case NVME_IOCTL_SUBMIT_IO:
return nvme_submit_io(ns, (void __user *)arg);
+ case SG_GET_VERSION_NUM:
+ return nvme_sg_get_version_num((void __user *)arg);
+ case SG_IO:
+ return nvme_sg_io(ns, (void __user *)arg);
default:
return -ENOTTY;
}
while (bio_list_peek(&nvmeq->sq_cong)) {
struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
+
+ if (bio_list_empty(&nvmeq->sq_cong))
+ remove_wait_queue(&nvmeq->sq_full,
+ &nvmeq->sq_cong_wait);
if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full,
+ &nvmeq->sq_cong_wait);
bio_list_add_head(&nvmeq->sq_cong, bio);
break;
}
- if (bio_list_empty(&nvmeq->sq_cong))
- remove_wait_queue(&nvmeq->sq_full,
- &nvmeq->sq_cong_wait);
}
}
struct nvme_dev *dev;
while (!kthread_should_stop()) {
- __set_current_state(TASK_RUNNING);
+ set_current_state(TASK_INTERRUPTIBLE);
spin_lock(&dev_list_lock);
list_for_each_entry(dev, &dev_list, node) {
int i;
}
}
spin_unlock(&dev_list_lock);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
+ schedule_timeout(round_jiffies_relative(HZ));
}
return 0;
}
spin_unlock(&dev_list_lock);
}
+static void nvme_config_discard(struct nvme_ns *ns)
+{
+ u32 logical_block_size = queue_logical_block_size(ns->queue);
+ ns->queue->limits.discard_zeroes_data = 0;
+ ns->queue->limits.discard_alignment = logical_block_size;
+ ns->queue->limits.discard_granularity = logical_block_size;
+ ns->queue->limits.max_discard_sectors = 0xffffffff;
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
+}
+
static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
{
ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
-/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
blk_queue_make_request(ns->queue, nvme_make_request);
ns->dev = dev;
ns->queue->queuedata = ns;
ns->disk = disk;
lbaf = id->flbas & 0xf;
ns->lba_shift = id->lbaf[lbaf].ds;
+ ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
if (dev->max_hw_sectors)
blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
+ if (dev->oncs & NVME_CTRL_ONCS_DSM)
+ nvme_config_discard(ns);
+
return ns;
out_free_queue:
nvme_free_queue(dev, i);
}
+/*
+ * Return: error value if an error occurred setting up the queues or calling
+ * Identify Device. 0 if these succeeded, even if adding some of the
+ * namespaces failed. At the moment, these failures are silent. TBD which
+ * failures should be reported.
+ */
static int nvme_dev_add(struct nvme_dev *dev)
{
int res, nn, i;
- struct nvme_ns *ns, *next;
+ struct nvme_ns *ns;
struct nvme_id_ctrl *ctrl;
struct nvme_id_ns *id_ns;
void *mem;
dma_addr_t dma_addr;
+ int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
res = nvme_setup_io_queues(dev);
if (res)
mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
res = nvme_identify(dev, 0, 1, dma_addr);
if (res) {
res = -EIO;
- goto out_free;
+ goto out;
}
ctrl = mem;
nn = le32_to_cpup(&ctrl->nn);
+ dev->oncs = le16_to_cpup(&ctrl->oncs);
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
- if (ctrl->mdts) {
- int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
+ if (ctrl->mdts)
dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
- }
+ if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) &&
+ (dev->pci_dev->device == 0x0953) && ctrl->vs[3])
+ dev->stripe_size = 1 << (ctrl->vs[3] + shift);
id_ns = mem;
for (i = 1; i <= nn; i++) {
}
list_for_each_entry(ns, &dev->namespaces, list)
add_disk(ns->disk);
-
- goto out;
-
- out_free:
- list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
- list_del(&ns->list);
- nvme_ns_free(ns);
- }
+ res = 0;
out:
dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
spin_unlock(&dev_list_lock);
}
+static void nvme_free_dev(struct kref *kref)
+{
+ struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
+ nvme_dev_remove(dev);
+ pci_disable_msix(dev->pci_dev);
+ iounmap(dev->bar);
+ nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
+ pci_disable_device(dev->pci_dev);
+ pci_release_regions(dev->pci_dev);
+ kfree(dev->queues);
+ kfree(dev->entry);
+ kfree(dev);
+}
+
+static int nvme_dev_open(struct inode *inode, struct file *f)
+{
+ struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev,
+ miscdev);
+ kref_get(&dev->kref);
+ f->private_data = dev;
+ return 0;
+}
+
+static int nvme_dev_release(struct inode *inode, struct file *f)
+{
+ struct nvme_dev *dev = f->private_data;
+ kref_put(&dev->kref, nvme_free_dev);
+ return 0;
+}
+
+static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ struct nvme_dev *dev = f->private_data;
+ switch (cmd) {
+ case NVME_IOCTL_ADMIN_CMD:
+ return nvme_user_admin_cmd(dev, (void __user *)arg);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations nvme_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = nvme_dev_open,
+ .release = nvme_dev_release,
+ .unlocked_ioctl = nvme_dev_ioctl,
+ .compat_ioctl = nvme_dev_ioctl,
+};
+
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int bars, result = -ENOMEM;
if (result)
goto delete;
+ scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
+ dev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ dev->miscdev.parent = &pdev->dev;
+ dev->miscdev.name = dev->name;
+ dev->miscdev.fops = &nvme_dev_fops;
+ result = misc_register(&dev->miscdev);
+ if (result)
+ goto remove;
+
+ kref_init(&dev->kref);
return 0;
+ remove:
+ nvme_dev_remove(dev);
delete:
spin_lock(&dev_list_lock);
list_del(&dev->node);
static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_dev_remove(dev);
- pci_disable_msix(pdev);
- iounmap(dev->bar);
- nvme_release_instance(dev);
- nvme_release_prp_pools(dev);
- pci_disable_device(pdev);
- pci_release_regions(pdev);
- kfree(dev->queues);
- kfree(dev->entry);
- kfree(dev);
+ misc_deregister(&dev->miscdev);
+ kref_put(&dev->kref, nvme_free_dev);
}
/* These functions are yet to be implemented */
--- /dev/null
+/*
+ * NVM Express device driver
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * Refer to the SCSI-NVMe Translation spec for details on how
+ * each command is translated.
+ */
+
+#include <linux/nvme.h>
+#include <linux/bio.h>
+#include <linux/bitops.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kdev_t.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/poison.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <scsi/sg.h>
+#include <scsi/scsi.h>
+
+
+static int sg_version_num = 30534; /* 2 digits for each component */
+
+#define SNTI_TRANSLATION_SUCCESS 0
+#define SNTI_INTERNAL_ERROR 1
+
+/* VPD Page Codes */
+#define VPD_SUPPORTED_PAGES 0x00
+#define VPD_SERIAL_NUMBER 0x80
+#define VPD_DEVICE_IDENTIFIERS 0x83
+#define VPD_EXTENDED_INQUIRY 0x86
+#define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1
+
+/* CDB offsets */
+#define REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET 6
+#define REPORT_LUNS_SR_OFFSET 2
+#define READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET 10
+#define REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET 4
+#define REQUEST_SENSE_DESC_OFFSET 1
+#define REQUEST_SENSE_DESC_MASK 0x01
+#define DESCRIPTOR_FORMAT_SENSE_DATA_TYPE 1
+#define INQUIRY_EVPD_BYTE_OFFSET 1
+#define INQUIRY_PAGE_CODE_BYTE_OFFSET 2
+#define INQUIRY_EVPD_BIT_MASK 1
+#define INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET 3
+#define START_STOP_UNIT_CDB_IMMED_OFFSET 1
+#define START_STOP_UNIT_CDB_IMMED_MASK 0x1
+#define START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET 3
+#define START_STOP_UNIT_CDB_POWER_COND_MOD_MASK 0xF
+#define START_STOP_UNIT_CDB_POWER_COND_OFFSET 4
+#define START_STOP_UNIT_CDB_POWER_COND_MASK 0xF0
+#define START_STOP_UNIT_CDB_NO_FLUSH_OFFSET 4
+#define START_STOP_UNIT_CDB_NO_FLUSH_MASK 0x4
+#define START_STOP_UNIT_CDB_START_OFFSET 4
+#define START_STOP_UNIT_CDB_START_MASK 0x1
+#define WRITE_BUFFER_CDB_MODE_OFFSET 1
+#define WRITE_BUFFER_CDB_MODE_MASK 0x1F
+#define WRITE_BUFFER_CDB_BUFFER_ID_OFFSET 2
+#define WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET 3
+#define WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET 6
+#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET 1
+#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK 0xC0
+#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT 6
+#define FORMAT_UNIT_CDB_LONG_LIST_OFFSET 1
+#define FORMAT_UNIT_CDB_LONG_LIST_MASK 0x20
+#define FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET 1
+#define FORMAT_UNIT_CDB_FORMAT_DATA_MASK 0x10
+#define FORMAT_UNIT_SHORT_PARM_LIST_LEN 4
+#define FORMAT_UNIT_LONG_PARM_LIST_LEN 8
+#define FORMAT_UNIT_PROT_INT_OFFSET 3
+#define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET 0
+#define FORMAT_UNIT_PROT_FIELD_USAGE_MASK 0x07
+#define UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET 7
+
+/* Misc. defines */
+#define NIBBLE_SHIFT 4
+#define FIXED_SENSE_DATA 0x70
+#define DESC_FORMAT_SENSE_DATA 0x72
+#define FIXED_SENSE_DATA_ADD_LENGTH 10
+#define LUN_ENTRY_SIZE 8
+#define LUN_DATA_HEADER_SIZE 8
+#define ALL_LUNS_RETURNED 0x02
+#define ALL_WELL_KNOWN_LUNS_RETURNED 0x01
+#define RESTRICTED_LUNS_RETURNED 0x00
+#define NVME_POWER_STATE_START_VALID 0x00
+#define NVME_POWER_STATE_ACTIVE 0x01
+#define NVME_POWER_STATE_IDLE 0x02
+#define NVME_POWER_STATE_STANDBY 0x03
+#define NVME_POWER_STATE_LU_CONTROL 0x07
+#define POWER_STATE_0 0
+#define POWER_STATE_1 1
+#define POWER_STATE_2 2
+#define POWER_STATE_3 3
+#define DOWNLOAD_SAVE_ACTIVATE 0x05
+#define DOWNLOAD_SAVE_DEFER_ACTIVATE 0x0E
+#define ACTIVATE_DEFERRED_MICROCODE 0x0F
+#define FORMAT_UNIT_IMMED_MASK 0x2
+#define FORMAT_UNIT_IMMED_OFFSET 1
+#define KELVIN_TEMP_FACTOR 273
+#define FIXED_FMT_SENSE_DATA_SIZE 18
+#define DESC_FMT_SENSE_DATA_SIZE 8
+
+/* SCSI/NVMe defines and bit masks */
+#define INQ_STANDARD_INQUIRY_PAGE 0x00
+#define INQ_SUPPORTED_VPD_PAGES_PAGE 0x00
+#define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80
+#define INQ_DEVICE_IDENTIFICATION_PAGE 0x83
+#define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86
+#define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1
+#define INQ_SERIAL_NUMBER_LENGTH 0x14
+#define INQ_NUM_SUPPORTED_VPD_PAGES 5
+#define VERSION_SPC_4 0x06
+#define ACA_UNSUPPORTED 0
+#define STANDARD_INQUIRY_LENGTH 36
+#define ADDITIONAL_STD_INQ_LENGTH 31
+#define EXTENDED_INQUIRY_DATA_PAGE_LENGTH 0x3C
+#define RESERVED_FIELD 0
+
+/* SCSI READ/WRITE Defines */
+#define IO_CDB_WP_MASK 0xE0
+#define IO_CDB_WP_SHIFT 5
+#define IO_CDB_FUA_MASK 0x8
+#define IO_6_CDB_LBA_OFFSET 0
+#define IO_6_CDB_LBA_MASK 0x001FFFFF
+#define IO_6_CDB_TX_LEN_OFFSET 4
+#define IO_6_DEFAULT_TX_LEN 256
+#define IO_10_CDB_LBA_OFFSET 2
+#define IO_10_CDB_TX_LEN_OFFSET 7
+#define IO_10_CDB_WP_OFFSET 1
+#define IO_10_CDB_FUA_OFFSET 1
+#define IO_12_CDB_LBA_OFFSET 2
+#define IO_12_CDB_TX_LEN_OFFSET 6
+#define IO_12_CDB_WP_OFFSET 1
+#define IO_12_CDB_FUA_OFFSET 1
+#define IO_16_CDB_FUA_OFFSET 1
+#define IO_16_CDB_WP_OFFSET 1
+#define IO_16_CDB_LBA_OFFSET 2
+#define IO_16_CDB_TX_LEN_OFFSET 10
+
+/* Mode Sense/Select defines */
+#define MODE_PAGE_INFO_EXCEP 0x1C
+#define MODE_PAGE_CACHING 0x08
+#define MODE_PAGE_CONTROL 0x0A
+#define MODE_PAGE_POWER_CONDITION 0x1A
+#define MODE_PAGE_RETURN_ALL 0x3F
+#define MODE_PAGE_BLK_DES_LEN 0x08
+#define MODE_PAGE_LLBAA_BLK_DES_LEN 0x10
+#define MODE_PAGE_CACHING_LEN 0x14
+#define MODE_PAGE_CONTROL_LEN 0x0C
+#define MODE_PAGE_POW_CND_LEN 0x28
+#define MODE_PAGE_INF_EXC_LEN 0x0C
+#define MODE_PAGE_ALL_LEN 0x54
+#define MODE_SENSE6_MPH_SIZE 4
+#define MODE_SENSE6_ALLOC_LEN_OFFSET 4
+#define MODE_SENSE_PAGE_CONTROL_OFFSET 2
+#define MODE_SENSE_PAGE_CONTROL_MASK 0xC0
+#define MODE_SENSE_PAGE_CODE_OFFSET 2
+#define MODE_SENSE_PAGE_CODE_MASK 0x3F
+#define MODE_SENSE_LLBAA_OFFSET 1
+#define MODE_SENSE_LLBAA_MASK 0x10
+#define MODE_SENSE_LLBAA_SHIFT 4
+#define MODE_SENSE_DBD_OFFSET 1
+#define MODE_SENSE_DBD_MASK 8
+#define MODE_SENSE_DBD_SHIFT 3
+#define MODE_SENSE10_MPH_SIZE 8
+#define MODE_SENSE10_ALLOC_LEN_OFFSET 7
+#define MODE_SELECT_CDB_PAGE_FORMAT_OFFSET 1
+#define MODE_SELECT_CDB_SAVE_PAGES_OFFSET 1
+#define MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET 4
+#define MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET 7
+#define MODE_SELECT_CDB_PAGE_FORMAT_MASK 0x10
+#define MODE_SELECT_CDB_SAVE_PAGES_MASK 0x1
+#define MODE_SELECT_6_BD_OFFSET 3
+#define MODE_SELECT_10_BD_OFFSET 6
+#define MODE_SELECT_10_LLBAA_OFFSET 4
+#define MODE_SELECT_10_LLBAA_MASK 1
+#define MODE_SELECT_6_MPH_SIZE 4
+#define MODE_SELECT_10_MPH_SIZE 8
+#define CACHING_MODE_PAGE_WCE_MASK 0x04
+#define MODE_SENSE_BLK_DESC_ENABLED 0
+#define MODE_SENSE_BLK_DESC_COUNT 1
+#define MODE_SELECT_PAGE_CODE_MASK 0x3F
+#define SHORT_DESC_BLOCK 8
+#define LONG_DESC_BLOCK 16
+#define MODE_PAGE_POW_CND_LEN_FIELD 0x26
+#define MODE_PAGE_INF_EXC_LEN_FIELD 0x0A
+#define MODE_PAGE_CACHING_LEN_FIELD 0x12
+#define MODE_PAGE_CONTROL_LEN_FIELD 0x0A
+#define MODE_SENSE_PC_CURRENT_VALUES 0
+
+/* Log Sense defines */
+#define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE 0x00
+#define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH 0x07
+#define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE 0x2F
+#define LOG_PAGE_TEMPERATURE_PAGE 0x0D
+#define LOG_SENSE_CDB_SP_OFFSET 1
+#define LOG_SENSE_CDB_SP_NOT_ENABLED 0
+#define LOG_SENSE_CDB_PC_OFFSET 2
+#define LOG_SENSE_CDB_PC_MASK 0xC0
+#define LOG_SENSE_CDB_PC_SHIFT 6
+#define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES 1
+#define LOG_SENSE_CDB_PAGE_CODE_MASK 0x3F
+#define LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET 7
+#define REMAINING_INFO_EXCP_PAGE_LENGTH 0x8
+#define LOG_INFO_EXCP_PAGE_LENGTH 0xC
+#define REMAINING_TEMP_PAGE_LENGTH 0xC
+#define LOG_TEMP_PAGE_LENGTH 0x10
+#define LOG_TEMP_UNKNOWN 0xFF
+#define SUPPORTED_LOG_PAGES_PAGE_LENGTH 0x3
+
+/* Read Capacity defines */
+#define READ_CAP_10_RESP_SIZE 8
+#define READ_CAP_16_RESP_SIZE 32
+
+/* NVMe Namespace and Command Defines */
+#define NVME_GET_SMART_LOG_PAGE 0x02
+#define NVME_GET_FEAT_TEMP_THRESH 0x04
+#define BYTES_TO_DWORDS 4
+#define NVME_MAX_FIRMWARE_SLOT 7
+
+/* Report LUNs defines */
+#define REPORT_LUNS_FIRST_LUN_OFFSET 8
+
+/* SCSI ADDITIONAL SENSE Codes */
+
+#define SCSI_ASC_NO_SENSE 0x00
+#define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT 0x03
+#define SCSI_ASC_LUN_NOT_READY 0x04
+#define SCSI_ASC_WARNING 0x0B
+#define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED 0x10
+#define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED 0x10
+#define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED 0x10
+#define SCSI_ASC_UNRECOVERED_READ_ERROR 0x11
+#define SCSI_ASC_MISCOMPARE_DURING_VERIFY 0x1D
+#define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID 0x20
+#define SCSI_ASC_ILLEGAL_COMMAND 0x20
+#define SCSI_ASC_ILLEGAL_BLOCK 0x21
+#define SCSI_ASC_INVALID_CDB 0x24
+#define SCSI_ASC_INVALID_LUN 0x25
+#define SCSI_ASC_INVALID_PARAMETER 0x26
+#define SCSI_ASC_FORMAT_COMMAND_FAILED 0x31
+#define SCSI_ASC_INTERNAL_TARGET_FAILURE 0x44
+
+/* SCSI ADDITIONAL SENSE Code Qualifiers */
+
+#define SCSI_ASCQ_CAUSE_NOT_REPORTABLE 0x00
+#define SCSI_ASCQ_FORMAT_COMMAND_FAILED 0x01
+#define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED 0x01
+#define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED 0x02
+#define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED 0x03
+#define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04
+#define SCSI_ASCQ_POWER_LOSS_EXPECTED 0x08
+#define SCSI_ASCQ_INVALID_LUN_ID 0x09
+
+/**
+ * DEVICE_SPECIFIC_PARAMETER in mode parameter header (see sbc2r16) to
+ * enable DPOFUA support type 0x10 value.
+ */
+#define DEVICE_SPECIFIC_PARAMETER 0
+#define VPD_ID_DESCRIPTOR_LENGTH sizeof(VPD_IDENTIFICATION_DESCRIPTOR)
+
+/* MACROs to extract information from CDBs */
+
+#define GET_OPCODE(cdb) cdb[0]
+
+#define GET_U8_FROM_CDB(cdb, index) (cdb[index] << 0)
+
+#define GET_U16_FROM_CDB(cdb, index) ((cdb[index] << 8) | (cdb[index + 1] << 0))
+
+#define GET_U24_FROM_CDB(cdb, index) ((cdb[index] << 16) | \
+(cdb[index + 1] << 8) | \
+(cdb[index + 2] << 0))
+
+#define GET_U32_FROM_CDB(cdb, index) ((cdb[index] << 24) | \
+(cdb[index + 1] << 16) | \
+(cdb[index + 2] << 8) | \
+(cdb[index + 3] << 0))
+
+#define GET_U64_FROM_CDB(cdb, index) ((((u64)cdb[index]) << 56) | \
+(((u64)cdb[index + 1]) << 48) | \
+(((u64)cdb[index + 2]) << 40) | \
+(((u64)cdb[index + 3]) << 32) | \
+(((u64)cdb[index + 4]) << 24) | \
+(((u64)cdb[index + 5]) << 16) | \
+(((u64)cdb[index + 6]) << 8) | \
+(((u64)cdb[index + 7]) << 0))
+
+/* Inquiry Helper Macros */
+#define GET_INQ_EVPD_BIT(cdb) \
+((GET_U8_FROM_CDB(cdb, INQUIRY_EVPD_BYTE_OFFSET) & \
+INQUIRY_EVPD_BIT_MASK) ? 1 : 0)
+
+#define GET_INQ_PAGE_CODE(cdb) \
+(GET_U8_FROM_CDB(cdb, INQUIRY_PAGE_CODE_BYTE_OFFSET))
+
+#define GET_INQ_ALLOC_LENGTH(cdb) \
+(GET_U16_FROM_CDB(cdb, INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET))
+
+/* Report LUNs Helper Macros */
+#define GET_REPORT_LUNS_ALLOC_LENGTH(cdb) \
+(GET_U32_FROM_CDB(cdb, REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET))
+
+/* Read Capacity Helper Macros */
+#define GET_READ_CAP_16_ALLOC_LENGTH(cdb) \
+(GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET))
+
+#define IS_READ_CAP_16(cdb) \
+((cdb[0] == SERVICE_ACTION_IN && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0)
+
+/* Request Sense Helper Macros */
+#define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb) \
+(GET_U8_FROM_CDB(cdb, REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET))
+
+/* Mode Sense Helper Macros */
+#define GET_MODE_SENSE_DBD(cdb) \
+((GET_U8_FROM_CDB(cdb, MODE_SENSE_DBD_OFFSET) & MODE_SENSE_DBD_MASK) >> \
+MODE_SENSE_DBD_SHIFT)
+
+#define GET_MODE_SENSE_LLBAA(cdb) \
+((GET_U8_FROM_CDB(cdb, MODE_SENSE_LLBAA_OFFSET) & \
+MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT)
+
+#define GET_MODE_SENSE_MPH_SIZE(cdb10) \
+(cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE)
+
+
+/* Struct to gather data that needs to be extracted from a SCSI CDB.
+ Not conforming to any particular CDB variant, but compatible with all. */
+
+struct nvme_trans_io_cdb {
+ u8 fua;
+ u8 prot_info;
+ u64 lba;
+ u32 xfer_len;
+};
+
+
+/* Internal Helper Functions */
+
+
+/* Copy data to userspace memory */
+
+static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from,
+ unsigned long n)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ unsigned long not_copied;
+ int i;
+ void *index = from;
+ size_t remaining = n;
+ size_t xfer_len;
+
+ if (hdr->iovec_count > 0) {
+ struct sg_iovec sgl;
+
+ for (i = 0; i < hdr->iovec_count; i++) {
+ not_copied = copy_from_user(&sgl, hdr->dxferp +
+ i * sizeof(struct sg_iovec),
+ sizeof(struct sg_iovec));
+ if (not_copied)
+ return -EFAULT;
+ xfer_len = min(remaining, sgl.iov_len);
+ not_copied = copy_to_user(sgl.iov_base, index,
+ xfer_len);
+ if (not_copied) {
+ res = -EFAULT;
+ break;
+ }
+ index += xfer_len;
+ remaining -= xfer_len;
+ if (remaining == 0)
+ break;
+ }
+ return res;
+ }
+ not_copied = copy_to_user(hdr->dxferp, from, n);
+ if (not_copied)
+ res = -EFAULT;
+ return res;
+}
+
+/* Copy data from userspace memory */
+
+static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to,
+ unsigned long n)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ unsigned long not_copied;
+ int i;
+ void *index = to;
+ size_t remaining = n;
+ size_t xfer_len;
+
+ if (hdr->iovec_count > 0) {
+ struct sg_iovec sgl;
+
+ for (i = 0; i < hdr->iovec_count; i++) {
+ not_copied = copy_from_user(&sgl, hdr->dxferp +
+ i * sizeof(struct sg_iovec),
+ sizeof(struct sg_iovec));
+ if (not_copied)
+ return -EFAULT;
+ xfer_len = min(remaining, sgl.iov_len);
+ not_copied = copy_from_user(index, sgl.iov_base,
+ xfer_len);
+ if (not_copied) {
+ res = -EFAULT;
+ break;
+ }
+ index += xfer_len;
+ remaining -= xfer_len;
+ if (remaining == 0)
+ break;
+ }
+ return res;
+ }
+
+ not_copied = copy_from_user(to, hdr->dxferp, n);
+ if (not_copied)
+ res = -EFAULT;
+ return res;
+}
+
+/* Status/Sense Buffer Writeback */
+
+static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key,
+ u8 asc, u8 ascq)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 xfer_len;
+ u8 resp[DESC_FMT_SENSE_DATA_SIZE];
+
+ if (scsi_status_is_good(status)) {
+ hdr->status = SAM_STAT_GOOD;
+ hdr->masked_status = GOOD;
+ hdr->host_status = DID_OK;
+ hdr->driver_status = DRIVER_OK;
+ hdr->sb_len_wr = 0;
+ } else {
+ hdr->status = status;
+ hdr->masked_status = status >> 1;
+ hdr->host_status = DID_OK;
+ hdr->driver_status = DRIVER_OK;
+
+ memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE);
+ resp[0] = DESC_FORMAT_SENSE_DATA;
+ resp[1] = sense_key;
+ resp[2] = asc;
+ resp[3] = ascq;
+
+ xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE);
+ hdr->sb_len_wr = xfer_len;
+ if (copy_to_user(hdr->sbp, resp, xfer_len) > 0)
+ res = -EFAULT;
+ }
+
+ return res;
+}
+
+static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc)
+{
+ u8 status, sense_key, asc, ascq;
+ int res = SNTI_TRANSLATION_SUCCESS;
+
+ /* For non-nvme (Linux) errors, simply return the error code */
+ if (nvme_sc < 0)
+ return nvme_sc;
+
+ /* Mask DNR, More, and reserved fields */
+ nvme_sc &= 0x7FF;
+
+ switch (nvme_sc) {
+ /* Generic Command Status */
+ case NVME_SC_SUCCESS:
+ status = SAM_STAT_GOOD;
+ sense_key = NO_SENSE;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_INVALID_OPCODE:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_ILLEGAL_COMMAND;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_INVALID_FIELD:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_INVALID_CDB;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_DATA_XFER_ERROR:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_POWER_LOSS:
+ status = SAM_STAT_TASK_ABORTED;
+ sense_key = ABORTED_COMMAND;
+ asc = SCSI_ASC_WARNING;
+ ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
+ break;
+ case NVME_SC_INTERNAL:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = HARDWARE_ERROR;
+ asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_ABORT_REQ:
+ status = SAM_STAT_TASK_ABORTED;
+ sense_key = ABORTED_COMMAND;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_ABORT_QUEUE:
+ status = SAM_STAT_TASK_ABORTED;
+ sense_key = ABORTED_COMMAND;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_FUSED_FAIL:
+ status = SAM_STAT_TASK_ABORTED;
+ sense_key = ABORTED_COMMAND;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_FUSED_MISSING:
+ status = SAM_STAT_TASK_ABORTED;
+ sense_key = ABORTED_COMMAND;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_INVALID_NS:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
+ ascq = SCSI_ASCQ_INVALID_LUN_ID;
+ break;
+ case NVME_SC_LBA_RANGE:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_ILLEGAL_BLOCK;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_CAP_EXCEEDED:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_NS_NOT_READY:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = NOT_READY;
+ asc = SCSI_ASC_LUN_NOT_READY;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+
+ /* Command Specific Status */
+ case NVME_SC_INVALID_FORMAT:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
+ ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
+ break;
+ case NVME_SC_BAD_ATTRIBUTES:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_INVALID_CDB;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+
+ /* Media Errors */
+ case NVME_SC_WRITE_FAULT:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_READ_ERROR:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_GUARD_CHECK:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
+ ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
+ break;
+ case NVME_SC_APPTAG_CHECK:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
+ ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
+ break;
+ case NVME_SC_REFTAG_CHECK:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MEDIUM_ERROR;
+ asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
+ ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
+ break;
+ case NVME_SC_COMPARE_FAILED:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = MISCOMPARE;
+ asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case NVME_SC_ACCESS_DENIED:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
+ ascq = SCSI_ASCQ_INVALID_LUN_ID;
+ break;
+
+ /* Unspecified/Default */
+ case NVME_SC_CMDID_CONFLICT:
+ case NVME_SC_CMD_SEQ_ERROR:
+ case NVME_SC_CQ_INVALID:
+ case NVME_SC_QID_INVALID:
+ case NVME_SC_QUEUE_SIZE:
+ case NVME_SC_ABORT_LIMIT:
+ case NVME_SC_ABORT_MISSING:
+ case NVME_SC_ASYNC_LIMIT:
+ case NVME_SC_FIRMWARE_SLOT:
+ case NVME_SC_FIRMWARE_IMAGE:
+ case NVME_SC_INVALID_VECTOR:
+ case NVME_SC_INVALID_LOG_PAGE:
+ default:
+ status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ILLEGAL_REQUEST;
+ asc = SCSI_ASC_NO_SENSE;
+ ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ }
+
+ res = nvme_trans_completion(hdr, status, sense_key, asc, ascq);
+
+ return res;
+}
+
+/* INQUIRY Helper Functions */
+
+static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *inq_response,
+ int alloc_len)
+{
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ns *id_ns;
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ int xfer_len;
+ u8 resp_data_format = 0x02;
+ u8 protect;
+ u8 cmdque = 0x01 << 1;
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+
+ /* nvme ns identify - use DPS value for PROTECT field */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ /*
+ * If nvme_sc was -ve, res will be -ve here.
+ * If nvme_sc was +ve, the status would bace been translated, and res
+ * can only be 0 or -ve.
+ * - If 0 && nvme_sc > 0, then go into next if where res gets nvme_sc
+ * - If -ve, return because its a Linux error.
+ */
+ if (res)
+ goto out_free;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_free;
+ }
+ id_ns = mem;
+ (id_ns->dps) ? (protect = 0x01) : (protect = 0);
+
+ memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+ inq_response[2] = VERSION_SPC_4;
+ inq_response[3] = resp_data_format; /*normaca=0 | hisup=0 */
+ inq_response[4] = ADDITIONAL_STD_INQ_LENGTH;
+ inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */
+ inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */
+ strncpy(&inq_response[8], "NVMe ", 8);
+ strncpy(&inq_response[16], dev->model, 16);
+ strncpy(&inq_response[32], dev->firmware_rev, 4);
+
+ xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ out_free:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out_dma:
+ return res;
+}
+
+static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *inq_response,
+ int alloc_len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+
+ memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+ inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE; /* Page Code */
+ inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES; /* Page Length */
+ inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE;
+ inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE;
+ inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE;
+ inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
+ inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE;
+
+ xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ return res;
+}
+
+static int nvme_trans_unit_serial_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *inq_response,
+ int alloc_len)
+{
+ struct nvme_dev *dev = ns->dev;
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+
+ memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+ inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */
+ inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */
+ strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH);
+
+ xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ return res;
+}
+
+static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *inq_response, int alloc_len)
+{
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ctrl *id_ctrl;
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ u8 ieee[4];
+ int xfer_len;
+ __be32 tmp_id = cpu_to_be32(ns->ns_id);
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+
+ /* nvme controller identify */
+ nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_free;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_free;
+ }
+ id_ctrl = mem;
+
+ /* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */
+ ieee[0] = id_ctrl->ieee[0] << 4;
+ ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4;
+ ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4;
+ ieee[3] = id_ctrl->ieee[2] >> 4;
+
+ memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+ inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */
+ inq_response[3] = 20; /* Page Length */
+ /* Designation Descriptor start */
+ inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */
+ inq_response[5] = 0x03; /* PIV=0b | Asso=00b | Designator Type=3h */
+ inq_response[6] = 0x00; /* Rsvd */
+ inq_response[7] = 16; /* Designator Length */
+ /* Designator start */
+ inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/
+ inq_response[9] = ieee[2]; /* IEEE ID */
+ inq_response[10] = ieee[1]; /* IEEE ID */
+ inq_response[11] = ieee[0]; /* IEEE ID| Vendor Specific ID... */
+ inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8;
+ inq_response[13] = (dev->pci_dev->vendor & 0x00FF);
+ inq_response[14] = dev->serial[0];
+ inq_response[15] = dev->serial[1];
+ inq_response[16] = dev->model[0];
+ inq_response[17] = dev->model[1];
+ memcpy(&inq_response[18], &tmp_id, sizeof(u32));
+ /* Last 2 bytes are zero */
+
+ xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ out_free:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out_dma:
+ return res;
+}
+
+static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ int alloc_len)
+{
+ u8 *inq_response;
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ctrl *id_ctrl;
+ struct nvme_id_ns *id_ns;
+ int xfer_len;
+ u8 microcode = 0x80;
+ u8 spt;
+ u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7};
+ u8 grd_chk, app_chk, ref_chk, protect;
+ u8 uask_sup = 0x20;
+ u8 v_sup;
+ u8 luiclr = 0x01;
+
+ inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
+ if (inq_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+
+ /* nvme ns identify */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_free;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_free;
+ }
+ id_ns = mem;
+ spt = spt_lut[(id_ns->dpc) & 0x07] << 3;
+ (id_ns->dps) ? (protect = 0x01) : (protect = 0);
+ grd_chk = protect << 2;
+ app_chk = protect << 1;
+ ref_chk = protect;
+
+ /* nvme controller identify */
+ nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_free;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_free;
+ }
+ id_ctrl = mem;
+ v_sup = id_ctrl->vwc;
+
+ memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
+ inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE; /* Page Code */
+ inq_response[2] = 0x00; /* Page Length MSB */
+ inq_response[3] = 0x3C; /* Page Length LSB */
+ inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk;
+ inq_response[5] = uask_sup;
+ inq_response[6] = v_sup;
+ inq_response[7] = luiclr;
+ inq_response[8] = 0;
+ inq_response[9] = 0;
+
+ xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ out_free:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out_dma:
+ kfree(inq_response);
+ out_mem:
+ return res;
+}
+
+static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ int alloc_len)
+{
+ u8 *inq_response;
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+
+ inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
+ if (inq_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+
+ memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
+ inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE; /* Page Code */
+ inq_response[2] = 0x00; /* Page Length MSB */
+ inq_response[3] = 0x3C; /* Page Length LSB */
+ inq_response[4] = 0x00; /* Medium Rotation Rate MSB */
+ inq_response[5] = 0x01; /* Medium Rotation Rate LSB */
+ inq_response[6] = 0x00; /* Form Factor */
+
+ xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ kfree(inq_response);
+ out_mem:
+ return res;
+}
+
+/* LOG SENSE Helper Functions */
+
+static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ int alloc_len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+ u8 *log_response;
+
+ log_response = kmalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL);
+ if (log_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+ memset(log_response, 0, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
+
+ log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
+ /* Subpage=0x00, Page Length MSB=0 */
+ log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH;
+ log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
+ log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
+ log_response[6] = LOG_PAGE_TEMPERATURE_PAGE;
+
+ xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
+
+ kfree(log_response);
+ out_mem:
+ return res;
+}
+
+static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, int alloc_len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+ u8 *log_response;
+ struct nvme_command c;
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_smart_log *smart_log;
+ dma_addr_t dma_addr;
+ void *mem;
+ u8 temp_c;
+ u16 temp_k;
+
+ log_response = kmalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
+ if (log_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+ memset(log_response, 0, LOG_INFO_EXCP_PAGE_LENGTH);
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev,
+ sizeof(struct nvme_smart_log),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+
+ /* Get SMART Log Page */
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_admin_get_log_page;
+ c.common.nsid = cpu_to_le32(0xFFFFFFFF);
+ c.common.prp1 = cpu_to_le64(dma_addr);
+ c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) /
+ BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE);
+ res = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (res != NVME_SC_SUCCESS) {
+ temp_c = LOG_TEMP_UNKNOWN;
+ } else {
+ smart_log = mem;
+ temp_k = (smart_log->temperature[1] << 8) +
+ (smart_log->temperature[0]);
+ temp_c = temp_k - KELVIN_TEMP_FACTOR;
+ }
+
+ log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
+ /* Subpage=0x00, Page Length MSB=0 */
+ log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH;
+ /* Informational Exceptions Log Parameter 1 Start */
+ /* Parameter Code=0x0000 bytes 4,5 */
+ log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */
+ log_response[7] = 0x04; /* PARAMETER LENGTH */
+ /* Add sense Code and qualifier = 0x00 each */
+ /* Use Temperature from NVMe Get Log Page, convert to C from K */
+ log_response[10] = temp_c;
+
+ xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
+
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
+ mem, dma_addr);
+ out_dma:
+ kfree(log_response);
+ out_mem:
+ return res;
+}
+
+static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ int alloc_len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+ u8 *log_response;
+ struct nvme_command c;
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_smart_log *smart_log;
+ dma_addr_t dma_addr;
+ void *mem;
+ u32 feature_resp;
+ u8 temp_c_cur, temp_c_thresh;
+ u16 temp_k;
+
+ log_response = kmalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
+ if (log_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+ memset(log_response, 0, LOG_TEMP_PAGE_LENGTH);
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev,
+ sizeof(struct nvme_smart_log),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+
+ /* Get SMART Log Page */
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_admin_get_log_page;
+ c.common.nsid = cpu_to_le32(0xFFFFFFFF);
+ c.common.prp1 = cpu_to_le64(dma_addr);
+ c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) /
+ BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE);
+ res = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (res != NVME_SC_SUCCESS) {
+ temp_c_cur = LOG_TEMP_UNKNOWN;
+ } else {
+ smart_log = mem;
+ temp_k = (smart_log->temperature[1] << 8) +
+ (smart_log->temperature[0]);
+ temp_c_cur = temp_k - KELVIN_TEMP_FACTOR;
+ }
+
+ /* Get Features for Temp Threshold */
+ res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0,
+ &feature_resp);
+ if (res != NVME_SC_SUCCESS)
+ temp_c_thresh = LOG_TEMP_UNKNOWN;
+ else
+ temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR;
+
+ log_response[0] = LOG_PAGE_TEMPERATURE_PAGE;
+ /* Subpage=0x00, Page Length MSB=0 */
+ log_response[3] = REMAINING_TEMP_PAGE_LENGTH;
+ /* Temperature Log Parameter 1 (Temperature) Start */
+ /* Parameter Code = 0x0000 */
+ log_response[6] = 0x01; /* Format and Linking = 01b */
+ log_response[7] = 0x02; /* Parameter Length */
+ /* Use Temperature from NVMe Get Log Page, convert to C from K */
+ log_response[9] = temp_c_cur;
+ /* Temperature Log Parameter 2 (Reference Temperature) Start */
+ log_response[11] = 0x01; /* Parameter Code = 0x0001 */
+ log_response[12] = 0x01; /* Format and Linking = 01b */
+ log_response[13] = 0x02; /* Parameter Length */
+ /* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */
+ log_response[15] = temp_c_thresh;
+
+ xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
+ res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
+
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
+ mem, dma_addr);
+ out_dma:
+ kfree(log_response);
+ out_mem:
+ return res;
+}
+
+/* MODE SENSE Helper Functions */
+
+static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa,
+ u16 mode_data_length, u16 blk_desc_len)
+{
+ /* Quick check to make sure I don't stomp on my own memory... */
+ if ((cdb10 && len < 8) || (!cdb10 && len < 4))
+ return SNTI_INTERNAL_ERROR;
+
+ if (cdb10) {
+ resp[0] = (mode_data_length & 0xFF00) >> 8;
+ resp[1] = (mode_data_length & 0x00FF);
+ /* resp[2] and [3] are zero */
+ resp[4] = llbaa;
+ resp[5] = RESERVED_FIELD;
+ resp[6] = (blk_desc_len & 0xFF00) >> 8;
+ resp[7] = (blk_desc_len & 0x00FF);
+ } else {
+ resp[0] = (mode_data_length & 0x00FF);
+ /* resp[1] and [2] are zero */
+ resp[3] = (blk_desc_len & 0x00FF);
+ }
+
+ return SNTI_TRANSLATION_SUCCESS;
+}
+
+static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *resp, int len, u8 llbaa)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ns *id_ns;
+ u8 flbas;
+ u32 lba_length;
+
+ if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN)
+ return SNTI_INTERNAL_ERROR;
+ else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
+ return SNTI_INTERNAL_ERROR;
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ /* nvme ns identify */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ns = mem;
+ flbas = (id_ns->flbas) & 0x0F;
+ lba_length = (1 << (id_ns->lbaf[flbas].ds));
+
+ if (llbaa == 0) {
+ __be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap));
+ /* Byte 4 is reserved */
+ __be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF);
+
+ memcpy(resp, &tmp_cap, sizeof(u32));
+ memcpy(&resp[4], &tmp_len, sizeof(u32));
+ } else {
+ __be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap));
+ __be32 tmp_len = cpu_to_be32(lba_length);
+
+ memcpy(resp, &tmp_cap, sizeof(u64));
+ /* Bytes 8, 9, 10, 11 are reserved */
+ memcpy(&resp[12], &tmp_len, sizeof(u32));
+ }
+
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out:
+ return res;
+}
+
+static int nvme_trans_fill_control_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *resp,
+ int len)
+{
+ if (len < MODE_PAGE_CONTROL_LEN)
+ return SNTI_INTERNAL_ERROR;
+
+ resp[0] = MODE_PAGE_CONTROL;
+ resp[1] = MODE_PAGE_CONTROL_LEN_FIELD;
+ resp[2] = 0x0E; /* TST=000b, TMF_ONLY=0, DPICZ=1,
+ * D_SENSE=1, GLTSD=1, RLEC=0 */
+ resp[3] = 0x12; /* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */
+ /* Byte 4: VS=0, RAC=0, UA_INT=0, SWP=0 */
+ resp[5] = 0x40; /* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */
+ /* resp[6] and [7] are obsolete, thus zero */
+ resp[8] = 0xFF; /* Busy timeout period = 0xffff */
+ resp[9] = 0xFF;
+ /* Bytes 10,11: Extended selftest completion time = 0x0000 */
+
+ return SNTI_TRANSLATION_SUCCESS;
+}
+
+static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr,
+ u8 *resp, int len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ u32 feature_resp;
+ u8 vwc;
+
+ if (len < MODE_PAGE_CACHING_LEN)
+ return SNTI_INTERNAL_ERROR;
+
+ nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0,
+ &feature_resp);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out;
+ }
+ vwc = feature_resp & 0x00000001;
+
+ resp[0] = MODE_PAGE_CACHING;
+ resp[1] = MODE_PAGE_CACHING_LEN_FIELD;
+ resp[2] = vwc << 2;
+
+ out:
+ return res;
+}
+
+static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *resp,
+ int len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+
+ if (len < MODE_PAGE_POW_CND_LEN)
+ return SNTI_INTERNAL_ERROR;
+
+ resp[0] = MODE_PAGE_POWER_CONDITION;
+ resp[1] = MODE_PAGE_POW_CND_LEN_FIELD;
+ /* All other bytes are zero */
+
+ return res;
+}
+
+static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *resp,
+ int len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+
+ if (len < MODE_PAGE_INF_EXC_LEN)
+ return SNTI_INTERNAL_ERROR;
+
+ resp[0] = MODE_PAGE_INFO_EXCEP;
+ resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD;
+ resp[2] = 0x88;
+ /* All other bytes are zero */
+
+ return res;
+}
+
+static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *resp, int len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u16 mode_pages_offset_1 = 0;
+ u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4;
+
+ mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN;
+ mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN;
+ mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN;
+
+ res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1],
+ MODE_PAGE_CACHING_LEN);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+ res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2],
+ MODE_PAGE_CONTROL_LEN);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+ res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3],
+ MODE_PAGE_POW_CND_LEN);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+ res = nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4],
+ MODE_PAGE_INF_EXC_LEN);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+
+ out:
+ return res;
+}
+
+static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa)
+{
+ if (dbd == MODE_SENSE_BLK_DESC_ENABLED) {
+ /* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */
+ return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT;
+ } else {
+ return 0;
+ }
+}
+
+static int nvme_trans_mode_page_create(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *cmd,
+ u16 alloc_len, u8 cdb10,
+ int (*mode_page_fill_func)
+ (struct nvme_ns *,
+ struct sg_io_hdr *hdr, u8 *, int),
+ u16 mode_pages_tot_len)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int xfer_len;
+ u8 *response;
+ u8 dbd, llbaa;
+ u16 resp_size;
+ int mph_size;
+ u16 mode_pages_offset_1;
+ u16 blk_desc_len, blk_desc_offset, mode_data_length;
+
+ dbd = GET_MODE_SENSE_DBD(cmd);
+ llbaa = GET_MODE_SENSE_LLBAA(cmd);
+ mph_size = GET_MODE_SENSE_MPH_SIZE(cdb10);
+ blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa);
+
+ resp_size = mph_size + blk_desc_len + mode_pages_tot_len;
+ /* Refer spc4r34 Table 440 for calculation of Mode data Length field */
+ mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len;
+
+ blk_desc_offset = mph_size;
+ mode_pages_offset_1 = blk_desc_offset + blk_desc_len;
+
+ response = kmalloc(resp_size, GFP_KERNEL);
+ if (response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+ memset(response, 0, resp_size);
+
+ res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10,
+ llbaa, mode_data_length, blk_desc_len);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out_free;
+ if (blk_desc_len > 0) {
+ res = nvme_trans_fill_blk_desc(ns, hdr,
+ &response[blk_desc_offset],
+ blk_desc_len, llbaa);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out_free;
+ }
+ res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1],
+ mode_pages_tot_len);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out_free;
+
+ xfer_len = min(alloc_len, resp_size);
+ res = nvme_trans_copy_to_user(hdr, response, xfer_len);
+
+ out_free:
+ kfree(response);
+ out_mem:
+ return res;
+}
+
+/* Read Capacity Helper Functions */
+
+static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns,
+ u8 cdb16)
+{
+ u8 flbas;
+ u32 lba_length;
+ u64 rlba;
+ u8 prot_en;
+ u8 p_type_lut[4] = {0, 0, 1, 2};
+ __be64 tmp_rlba;
+ __be32 tmp_rlba_32;
+ __be32 tmp_len;
+
+ flbas = (id_ns->flbas) & 0x0F;
+ lba_length = (1 << (id_ns->lbaf[flbas].ds));
+ rlba = le64_to_cpup(&id_ns->nsze) - 1;
+ (id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0);
+
+ if (!cdb16) {
+ if (rlba > 0xFFFFFFFF)
+ rlba = 0xFFFFFFFF;
+ tmp_rlba_32 = cpu_to_be32(rlba);
+ tmp_len = cpu_to_be32(lba_length);
+ memcpy(response, &tmp_rlba_32, sizeof(u32));
+ memcpy(&response[4], &tmp_len, sizeof(u32));
+ } else {
+ tmp_rlba = cpu_to_be64(rlba);
+ tmp_len = cpu_to_be32(lba_length);
+ memcpy(response, &tmp_rlba, sizeof(u64));
+ memcpy(&response[8], &tmp_len, sizeof(u32));
+ response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en;
+ /* P_I_Exponent = 0x0 | LBPPBE = 0x0 */
+ /* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */
+ /* Bytes 16-31 - Reserved */
+ }
+}
+
+/* Start Stop Unit Helper Functions */
+
+static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 pc, u8 pcmod, u8 start)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ctrl *id_ctrl;
+ int lowest_pow_st; /* max npss = lowest power consumption */
+ unsigned ps_desired = 0;
+
+ /* NVMe Controller Identify */
+ mem = dma_alloc_coherent(&dev->pci_dev->dev,
+ sizeof(struct nvme_id_ctrl),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ctrl = mem;
+ lowest_pow_st = id_ctrl->npss - 1;
+
+ switch (pc) {
+ case NVME_POWER_STATE_START_VALID:
+ /* Action unspecified if POWER CONDITION MODIFIER != 0 */
+ if (pcmod == 0 && start == 0x1)
+ ps_desired = POWER_STATE_0;
+ if (pcmod == 0 && start == 0x0)
+ ps_desired = lowest_pow_st;
+ break;
+ case NVME_POWER_STATE_ACTIVE:
+ /* Action unspecified if POWER CONDITION MODIFIER != 0 */
+ if (pcmod == 0)
+ ps_desired = POWER_STATE_0;
+ break;
+ case NVME_POWER_STATE_IDLE:
+ /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */
+ /* min of desired state and (lps-1) because lps is STOP */
+ if (pcmod == 0x0)
+ ps_desired = min(POWER_STATE_1, (lowest_pow_st - 1));
+ else if (pcmod == 0x1)
+ ps_desired = min(POWER_STATE_2, (lowest_pow_st - 1));
+ else if (pcmod == 0x2)
+ ps_desired = min(POWER_STATE_3, (lowest_pow_st - 1));
+ break;
+ case NVME_POWER_STATE_STANDBY:
+ /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */
+ if (pcmod == 0x0)
+ ps_desired = max(0, (lowest_pow_st - 2));
+ else if (pcmod == 0x1)
+ ps_desired = max(0, (lowest_pow_st - 1));
+ break;
+ case NVME_POWER_STATE_LU_CONTROL:
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+ nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0,
+ NULL);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc)
+ res = nvme_sc;
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
+ dma_addr);
+ out:
+ return res;
+}
+
+/* Write Buffer Helper Functions */
+/* Also using this for Format Unit with hdr passed as NULL, and buffer_id, 0 */
+
+static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 opcode, u32 tot_len, u32 offset,
+ u8 buffer_id)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_command c;
+ struct nvme_iod *iod = NULL;
+ unsigned length;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = opcode;
+ if (opcode == nvme_admin_download_fw) {
+ if (hdr->iovec_count > 0) {
+ /* Assuming SGL is not allowed for this command */
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ iod = nvme_map_user_pages(dev, DMA_TO_DEVICE,
+ (unsigned long)hdr->dxferp, tot_len);
+ if (IS_ERR(iod)) {
+ res = PTR_ERR(iod);
+ goto out;
+ }
+ length = nvme_setup_prps(dev, &c.common, iod, tot_len,
+ GFP_KERNEL);
+ if (length != tot_len) {
+ res = -ENOMEM;
+ goto out_unmap;
+ }
+
+ c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
+ c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
+ } else if (opcode == nvme_admin_activate_fw) {
+ u32 cdw10 = buffer_id | NVME_FWACT_REPL_ACTV;
+ c.common.cdw10[0] = cpu_to_le32(cdw10);
+ }
+
+ nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_unmap;
+ if (nvme_sc)
+ res = nvme_sc;
+
+ out_unmap:
+ if (opcode == nvme_admin_download_fw) {
+ nvme_unmap_user_pages(dev, DMA_TO_DEVICE, iod);
+ nvme_free_iod(dev, iod);
+ }
+ out:
+ return res;
+}
+
+/* Mode Select Helper Functions */
+
+static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
+ u16 *bd_len, u8 *llbaa)
+{
+ if (cdb10) {
+ /* 10 Byte CDB */
+ *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
+ parm_list[MODE_SELECT_10_BD_OFFSET + 1];
+ *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &&
+ MODE_SELECT_10_LLBAA_MASK;
+ } else {
+ /* 6 Byte CDB */
+ *bd_len = parm_list[MODE_SELECT_6_BD_OFFSET];
+ }
+}
+
+static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
+ u16 idx, u16 bd_len, u8 llbaa)
+{
+ u16 bd_num;
+
+ bd_num = bd_len / ((llbaa == 0) ?
+ SHORT_DESC_BLOCK : LONG_DESC_BLOCK);
+ /* Store block descriptor info if a FORMAT UNIT comes later */
+ /* TODO Saving 1st BD info; what to do if multiple BD received? */
+ if (llbaa == 0) {
+ /* Standard Block Descriptor - spc4r34 7.5.5.1 */
+ ns->mode_select_num_blocks =
+ (parm_list[idx + 1] << 16) +
+ (parm_list[idx + 2] << 8) +
+ (parm_list[idx + 3]);
+
+ ns->mode_select_block_len =
+ (parm_list[idx + 5] << 16) +
+ (parm_list[idx + 6] << 8) +
+ (parm_list[idx + 7]);
+ } else {
+ /* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */
+ ns->mode_select_num_blocks =
+ (((u64)parm_list[idx + 0]) << 56) +
+ (((u64)parm_list[idx + 1]) << 48) +
+ (((u64)parm_list[idx + 2]) << 40) +
+ (((u64)parm_list[idx + 3]) << 32) +
+ (((u64)parm_list[idx + 4]) << 24) +
+ (((u64)parm_list[idx + 5]) << 16) +
+ (((u64)parm_list[idx + 6]) << 8) +
+ ((u64)parm_list[idx + 7]);
+
+ ns->mode_select_block_len =
+ (parm_list[idx + 12] << 24) +
+ (parm_list[idx + 13] << 16) +
+ (parm_list[idx + 14] << 8) +
+ (parm_list[idx + 15]);
+ }
+}
+
+static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *mode_page, u8 page_code)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ unsigned dword11;
+
+ switch (page_code) {
+ case MODE_PAGE_CACHING:
+ dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
+ nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11,
+ 0, NULL);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ break;
+ if (nvme_sc) {
+ res = nvme_sc;
+ break;
+ }
+ break;
+ case MODE_PAGE_CONTROL:
+ break;
+ case MODE_PAGE_POWER_CONDITION:
+ /* Verify the OS is not trying to set timers */
+ if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) {
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_PARAMETER,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ if (!res)
+ res = SNTI_INTERNAL_ERROR;
+ break;
+ }
+ break;
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ if (!res)
+ res = SNTI_INTERNAL_ERROR;
+ break;
+ }
+
+ return res;
+}
+
+static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd, u16 parm_list_len, u8 pf,
+ u8 sp, u8 cdb10)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 *parm_list;
+ u16 bd_len;
+ u8 llbaa = 0;
+ u16 index, saved_index;
+ u8 page_code;
+ u16 mp_size;
+
+ /* Get parm list from data-in/out buffer */
+ parm_list = kmalloc(parm_list_len, GFP_KERNEL);
+ if (parm_list == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out_mem;
+
+ nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa);
+ index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE);
+
+ if (bd_len != 0) {
+ /* Block Descriptors present, parse */
+ nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa);
+ index += bd_len;
+ }
+ saved_index = index;
+
+ /* Multiple mode pages may be present; iterate through all */
+ /* In 1st Iteration, don't do NVME Command, only check for CDB errors */
+ do {
+ page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
+ mp_size = parm_list[index + 1] + 2;
+ if ((page_code != MODE_PAGE_CACHING) &&
+ (page_code != MODE_PAGE_CONTROL) &&
+ (page_code != MODE_PAGE_POWER_CONDITION)) {
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out_mem;
+ }
+ index += mp_size;
+ } while (index < parm_list_len);
+
+ /* In 2nd Iteration, do the NVME Commands */
+ index = saved_index;
+ do {
+ page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
+ mp_size = parm_list[index + 1] + 2;
+ res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index],
+ page_code);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ break;
+ index += mp_size;
+ } while (index < parm_list_len);
+
+ out_mem:
+ kfree(parm_list);
+ out:
+ return res;
+}
+
+/* Format Unit Helper Functions */
+
+static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ns *id_ns;
+ u8 flbas;
+
+ /*
+ * SCSI Expects a MODE SELECT would have been issued prior to
+ * a FORMAT UNIT, and the block size and number would be used
+ * from the block descriptor in it. If a MODE SELECT had not
+ * been issued, FORMAT shall use the current values for both.
+ */
+
+ if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
+ mem = dma_alloc_coherent(&dev->pci_dev->dev,
+ sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ /* nvme ns identify */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ns = mem;
+
+ if (ns->mode_select_num_blocks == 0)
+ ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap);
+ if (ns->mode_select_block_len == 0) {
+ flbas = (id_ns->flbas) & 0x0F;
+ ns->mode_select_block_len =
+ (1 << (id_ns->lbaf[flbas].ds));
+ }
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ mem, dma_addr);
+ }
+ out:
+ return res;
+}
+
+static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len,
+ u8 format_prot_info, u8 *nvme_pf_code)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 *parm_list;
+ u8 pf_usage, pf_code;
+
+ parm_list = kmalloc(len, GFP_KERNEL);
+ if (parm_list == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ res = nvme_trans_copy_from_user(hdr, parm_list, len);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out_mem;
+
+ if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] &
+ FORMAT_UNIT_IMMED_MASK) != 0) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out_mem;
+ }
+
+ if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN &&
+ (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out_mem;
+ }
+ pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] &
+ FORMAT_UNIT_PROT_FIELD_USAGE_MASK;
+ pf_code = (pf_usage << 2) | format_prot_info;
+ switch (pf_code) {
+ case 0:
+ *nvme_pf_code = 0;
+ break;
+ case 2:
+ *nvme_pf_code = 1;
+ break;
+ case 3:
+ *nvme_pf_code = 2;
+ break;
+ case 7:
+ *nvme_pf_code = 3;
+ break;
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+
+ out_mem:
+ kfree(parm_list);
+ out:
+ return res;
+}
+
+static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 prot_info)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ns *id_ns;
+ u8 i;
+ u8 flbas, nlbaf;
+ u8 selected_lbaf = 0xFF;
+ u32 cdw10 = 0;
+ struct nvme_command c;
+
+ /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ /* nvme ns identify */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ns = mem;
+ flbas = (id_ns->flbas) & 0x0F;
+ nlbaf = id_ns->nlbaf;
+
+ for (i = 0; i < nlbaf; i++) {
+ if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) {
+ selected_lbaf = i;
+ break;
+ }
+ }
+ if (selected_lbaf > 0x0F) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ }
+ if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ }
+
+ cdw10 |= prot_info << 5;
+ cdw10 |= selected_lbaf & 0x0F;
+ memset(&c, 0, sizeof(c));
+ c.format.opcode = nvme_admin_format_nvm;
+ c.format.nsid = cpu_to_le32(ns->ns_id);
+ c.format.cdw10 = cpu_to_le32(cdw10);
+
+ nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc)
+ res = nvme_sc;
+
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out:
+ return res;
+}
+
+/* Read/Write Helper Functions */
+
+static inline void nvme_trans_get_io_cdb6(u8 *cmd,
+ struct nvme_trans_io_cdb *cdb_info)
+{
+ cdb_info->fua = 0;
+ cdb_info->prot_info = 0;
+ cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_6_CDB_LBA_OFFSET) &
+ IO_6_CDB_LBA_MASK;
+ cdb_info->xfer_len = GET_U8_FROM_CDB(cmd, IO_6_CDB_TX_LEN_OFFSET);
+
+ /* sbc3r27 sec 5.32 - TRANSFER LEN of 0 implies a 256 Block transfer */
+ if (cdb_info->xfer_len == 0)
+ cdb_info->xfer_len = IO_6_DEFAULT_TX_LEN;
+}
+
+static inline void nvme_trans_get_io_cdb10(u8 *cmd,
+ struct nvme_trans_io_cdb *cdb_info)
+{
+ cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_10_CDB_FUA_OFFSET) &
+ IO_CDB_FUA_MASK;
+ cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_10_CDB_WP_OFFSET) &
+ IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
+ cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_10_CDB_LBA_OFFSET);
+ cdb_info->xfer_len = GET_U16_FROM_CDB(cmd, IO_10_CDB_TX_LEN_OFFSET);
+}
+
+static inline void nvme_trans_get_io_cdb12(u8 *cmd,
+ struct nvme_trans_io_cdb *cdb_info)
+{
+ cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_12_CDB_FUA_OFFSET) &
+ IO_CDB_FUA_MASK;
+ cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_12_CDB_WP_OFFSET) &
+ IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
+ cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_12_CDB_LBA_OFFSET);
+ cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_12_CDB_TX_LEN_OFFSET);
+}
+
+static inline void nvme_trans_get_io_cdb16(u8 *cmd,
+ struct nvme_trans_io_cdb *cdb_info)
+{
+ cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_16_CDB_FUA_OFFSET) &
+ IO_CDB_FUA_MASK;
+ cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_16_CDB_WP_OFFSET) &
+ IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
+ cdb_info->lba = GET_U64_FROM_CDB(cmd, IO_16_CDB_LBA_OFFSET);
+ cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_16_CDB_TX_LEN_OFFSET);
+}
+
+static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr,
+ struct nvme_trans_io_cdb *cdb_info,
+ u32 max_blocks)
+{
+ /* If using iovecs, send one nvme command per vector */
+ if (hdr->iovec_count > 0)
+ return hdr->iovec_count;
+ else if (cdb_info->xfer_len > max_blocks)
+ return ((cdb_info->xfer_len - 1) / max_blocks) + 1;
+ else
+ return 1;
+}
+
+static u16 nvme_trans_io_get_control(struct nvme_ns *ns,
+ struct nvme_trans_io_cdb *cdb_info)
+{
+ u16 control = 0;
+
+ /* When Protection information support is added, implement here */
+
+ if (cdb_info->fua > 0)
+ control |= NVME_RW_FUA;
+
+ return control;
+}
+
+static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ struct nvme_trans_io_cdb *cdb_info, u8 is_write)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_queue *nvmeq;
+ u32 num_cmds;
+ struct nvme_iod *iod;
+ u64 unit_len;
+ u64 unit_num_blocks; /* Number of blocks to xfer in each nvme cmd */
+ u32 retcode;
+ u32 i = 0;
+ u64 nvme_offset = 0;
+ void __user *next_mapping_addr;
+ struct nvme_command c;
+ u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
+ u16 control;
+ u32 max_blocks = nvme_block_nr(ns, dev->max_hw_sectors);
+
+ num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
+
+ /*
+ * This loop handles two cases.
+ * First, when an SGL is used in the form of an iovec list:
+ * - Use iov_base as the next mapping address for the nvme command_id
+ * - Use iov_len as the data transfer length for the command.
+ * Second, when we have a single buffer
+ * - If larger than max_blocks, split into chunks, offset
+ * each nvme command accordingly.
+ */
+ for (i = 0; i < num_cmds; i++) {
+ memset(&c, 0, sizeof(c));
+ if (hdr->iovec_count > 0) {
+ struct sg_iovec sgl;
+
+ retcode = copy_from_user(&sgl, hdr->dxferp +
+ i * sizeof(struct sg_iovec),
+ sizeof(struct sg_iovec));
+ if (retcode)
+ return -EFAULT;
+ unit_len = sgl.iov_len;
+ unit_num_blocks = unit_len >> ns->lba_shift;
+ next_mapping_addr = sgl.iov_base;
+ } else {
+ unit_num_blocks = min((u64)max_blocks,
+ (cdb_info->xfer_len - nvme_offset));
+ unit_len = unit_num_blocks << ns->lba_shift;
+ next_mapping_addr = hdr->dxferp +
+ ((1 << ns->lba_shift) * nvme_offset);
+ }
+
+ c.rw.opcode = opcode;
+ c.rw.nsid = cpu_to_le32(ns->ns_id);
+ c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset);
+ c.rw.length = cpu_to_le16(unit_num_blocks - 1);
+ control = nvme_trans_io_get_control(ns, cdb_info);
+ c.rw.control = cpu_to_le16(control);
+
+ iod = nvme_map_user_pages(dev,
+ (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ (unsigned long)next_mapping_addr, unit_len);
+ if (IS_ERR(iod)) {
+ res = PTR_ERR(iod);
+ goto out;
+ }
+ retcode = nvme_setup_prps(dev, &c.common, iod, unit_len,
+ GFP_KERNEL);
+ if (retcode != unit_len) {
+ nvme_unmap_user_pages(dev,
+ (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ iod);
+ nvme_free_iod(dev, iod);
+ res = -ENOMEM;
+ goto out;
+ }
+
+ nvme_offset += unit_num_blocks;
+
+ nvmeq = get_nvmeq(dev);
+ /*
+ * Since nvme_submit_sync_cmd sleeps, we can't keep
+ * preemption disabled. We may be preempted at any
+ * point, and be rescheduled to a different CPU. That
+ * will cause cacheline bouncing, but no additional
+ * races since q_lock already protects against other
+ * CPUs.
+ */
+ put_nvmeq(nvmeq);
+ nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL,
+ NVME_IO_TIMEOUT);
+ if (nvme_sc != NVME_SC_SUCCESS) {
+ nvme_unmap_user_pages(dev,
+ (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ iod);
+ nvme_free_iod(dev, iod);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ goto out;
+ }
+ nvme_unmap_user_pages(dev,
+ (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ iod);
+ nvme_free_iod(dev, iod);
+ }
+ res = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
+
+ out:
+ return res;
+}
+
+
+/* SCSI Command Translation Functions */
+
+static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ struct nvme_trans_io_cdb cdb_info;
+ u8 opcode = cmd[0];
+ u64 xfer_bytes;
+ u64 sum_iov_len = 0;
+ struct sg_iovec sgl;
+ int i;
+ size_t not_copied;
+
+ /* Extract Fields from CDB */
+ switch (opcode) {
+ case WRITE_6:
+ case READ_6:
+ nvme_trans_get_io_cdb6(cmd, &cdb_info);
+ break;
+ case WRITE_10:
+ case READ_10:
+ nvme_trans_get_io_cdb10(cmd, &cdb_info);
+ break;
+ case WRITE_12:
+ case READ_12:
+ nvme_trans_get_io_cdb12(cmd, &cdb_info);
+ break;
+ case WRITE_16:
+ case READ_16:
+ nvme_trans_get_io_cdb16(cmd, &cdb_info);
+ break;
+ default:
+ /* Will never really reach here */
+ res = SNTI_INTERNAL_ERROR;
+ goto out;
+ }
+
+ /* Calculate total length of transfer (in bytes) */
+ if (hdr->iovec_count > 0) {
+ for (i = 0; i < hdr->iovec_count; i++) {
+ not_copied = copy_from_user(&sgl, hdr->dxferp +
+ i * sizeof(struct sg_iovec),
+ sizeof(struct sg_iovec));
+ if (not_copied)
+ return -EFAULT;
+ sum_iov_len += sgl.iov_len;
+ /* IO vector sizes should be multiples of block size */
+ if (sgl.iov_len % (1 << ns->lba_shift) != 0) {
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_PARAMETER,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ }
+ } else {
+ sum_iov_len = hdr->dxfer_len;
+ }
+
+ /* As Per sg ioctl howto, if the lengths differ, use the lower one */
+ xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len);
+
+ /* If block count and actual data buffer size dont match, error out */
+ if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) {
+ res = -EINVAL;
+ goto out;
+ }
+
+ /* Check for 0 length transfer - it is not illegal */
+ if (cdb_info.xfer_len == 0)
+ goto out;
+
+ /* Send NVMe IO Command(s) */
+ res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+
+ out:
+ return res;
+}
+
+static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 evpd;
+ u8 page_code;
+ int alloc_len;
+ u8 *inq_response;
+
+ evpd = GET_INQ_EVPD_BIT(cmd);
+ page_code = GET_INQ_PAGE_CODE(cmd);
+ alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
+
+ inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL);
+ if (inq_response == NULL) {
+ res = -ENOMEM;
+ goto out_mem;
+ }
+
+ if (evpd == 0) {
+ if (page_code == INQ_STANDARD_INQUIRY_PAGE) {
+ res = nvme_trans_standard_inquiry_page(ns, hdr,
+ inq_response, alloc_len);
+ } else {
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ }
+ } else {
+ switch (page_code) {
+ case VPD_SUPPORTED_PAGES:
+ res = nvme_trans_supported_vpd_pages(ns, hdr,
+ inq_response, alloc_len);
+ break;
+ case VPD_SERIAL_NUMBER:
+ res = nvme_trans_unit_serial_page(ns, hdr, inq_response,
+ alloc_len);
+ break;
+ case VPD_DEVICE_IDENTIFIERS:
+ res = nvme_trans_device_id_page(ns, hdr, inq_response,
+ alloc_len);
+ break;
+ case VPD_EXTENDED_INQUIRY:
+ res = nvme_trans_ext_inq_page(ns, hdr, alloc_len);
+ break;
+ case VPD_BLOCK_DEV_CHARACTERISTICS:
+ res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
+ break;
+ default:
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST,
+ SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+ }
+ kfree(inq_response);
+ out_mem:
+ return res;
+}
+
+static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u16 alloc_len;
+ u8 sp;
+ u8 pc;
+ u8 page_code;
+
+ sp = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_SP_OFFSET);
+ if (sp != LOG_SENSE_CDB_SP_NOT_ENABLED) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ pc = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_PC_OFFSET);
+ page_code = pc & LOG_SENSE_CDB_PAGE_CODE_MASK;
+ pc = (pc & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT;
+ if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ alloc_len = GET_U16_FROM_CDB(cmd, LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET);
+ switch (page_code) {
+ case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE:
+ res = nvme_trans_log_supp_pages(ns, hdr, alloc_len);
+ break;
+ case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE:
+ res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len);
+ break;
+ case LOG_PAGE_TEMPERATURE_PAGE:
+ res = nvme_trans_log_temperature(ns, hdr, alloc_len);
+ break;
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+
+ out:
+ return res;
+}
+
+static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 cdb10 = 0;
+ u16 parm_list_len;
+ u8 page_format;
+ u8 save_pages;
+
+ page_format = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_PAGE_FORMAT_OFFSET);
+ page_format &= MODE_SELECT_CDB_PAGE_FORMAT_MASK;
+
+ save_pages = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_SAVE_PAGES_OFFSET);
+ save_pages &= MODE_SELECT_CDB_SAVE_PAGES_MASK;
+
+ if (GET_OPCODE(cmd) == MODE_SELECT) {
+ parm_list_len = GET_U8_FROM_CDB(cmd,
+ MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET);
+ } else {
+ parm_list_len = GET_U16_FROM_CDB(cmd,
+ MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET);
+ cdb10 = 1;
+ }
+
+ if (parm_list_len != 0) {
+ /*
+ * According to SPC-4 r24, a paramter list length field of 0
+ * shall not be considered an error
+ */
+ res = nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len,
+ page_format, save_pages, cdb10);
+ }
+
+ return res;
+}
+
+static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u16 alloc_len;
+ u8 cdb10 = 0;
+ u8 page_code;
+ u8 pc;
+
+ if (GET_OPCODE(cmd) == MODE_SENSE) {
+ alloc_len = GET_U8_FROM_CDB(cmd, MODE_SENSE6_ALLOC_LEN_OFFSET);
+ } else {
+ alloc_len = GET_U16_FROM_CDB(cmd,
+ MODE_SENSE10_ALLOC_LEN_OFFSET);
+ cdb10 = 1;
+ }
+
+ pc = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CONTROL_OFFSET) &
+ MODE_SENSE_PAGE_CONTROL_MASK;
+ if (pc != MODE_SENSE_PC_CURRENT_VALUES) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+
+ page_code = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CODE_OFFSET) &
+ MODE_SENSE_PAGE_CODE_MASK;
+ switch (page_code) {
+ case MODE_PAGE_CACHING:
+ res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+ cdb10,
+ &nvme_trans_fill_caching_page,
+ MODE_PAGE_CACHING_LEN);
+ break;
+ case MODE_PAGE_CONTROL:
+ res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+ cdb10,
+ &nvme_trans_fill_control_page,
+ MODE_PAGE_CONTROL_LEN);
+ break;
+ case MODE_PAGE_POWER_CONDITION:
+ res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+ cdb10,
+ &nvme_trans_fill_pow_cnd_page,
+ MODE_PAGE_POW_CND_LEN);
+ break;
+ case MODE_PAGE_INFO_EXCEP:
+ res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+ cdb10,
+ &nvme_trans_fill_inf_exc_page,
+ MODE_PAGE_INF_EXC_LEN);
+ break;
+ case MODE_PAGE_RETURN_ALL:
+ res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+ cdb10,
+ &nvme_trans_fill_all_pages,
+ MODE_PAGE_ALL_LEN);
+ break;
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+
+ out:
+ return res;
+}
+
+static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ u32 alloc_len = READ_CAP_10_RESP_SIZE;
+ u32 resp_size = READ_CAP_10_RESP_SIZE;
+ u32 xfer_len;
+ u8 cdb16;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ns *id_ns;
+ u8 *response;
+
+ cdb16 = IS_READ_CAP_16(cmd);
+ if (cdb16) {
+ alloc_len = GET_READ_CAP_16_ALLOC_LENGTH(cmd);
+ resp_size = READ_CAP_16_RESP_SIZE;
+ }
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ /* nvme ns identify */
+ nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ns = mem;
+
+ response = kmalloc(resp_size, GFP_KERNEL);
+ if (response == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+ memset(response, 0, resp_size);
+ nvme_trans_fill_read_cap(response, id_ns, cdb16);
+
+ xfer_len = min(alloc_len, resp_size);
+ res = nvme_trans_copy_to_user(hdr, response, xfer_len);
+
+ kfree(response);
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+ dma_addr);
+ out:
+ return res;
+}
+
+static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ u32 alloc_len, xfer_len, resp_size;
+ u8 select_report;
+ u8 *response;
+ struct nvme_dev *dev = ns->dev;
+ dma_addr_t dma_addr;
+ void *mem;
+ struct nvme_id_ctrl *id_ctrl;
+ u32 ll_length, lun_id;
+ u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
+ __be32 tmp_len;
+
+ alloc_len = GET_REPORT_LUNS_ALLOC_LENGTH(cmd);
+ select_report = GET_U8_FROM_CDB(cmd, REPORT_LUNS_SR_OFFSET);
+
+ if ((select_report != ALL_LUNS_RETURNED) &&
+ (select_report != ALL_WELL_KNOWN_LUNS_RETURNED) &&
+ (select_report != RESTRICTED_LUNS_RETURNED)) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ } else {
+ /* NVMe Controller Identify */
+ mem = dma_alloc_coherent(&dev->pci_dev->dev,
+ sizeof(struct nvme_id_ctrl),
+ &dma_addr, GFP_KERNEL);
+ if (mem == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out_dma;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out_dma;
+ }
+ id_ctrl = mem;
+ ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE;
+ resp_size = ll_length + LUN_DATA_HEADER_SIZE;
+
+ if (alloc_len < resp_size) {
+ res = nvme_trans_completion(hdr,
+ SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out_dma;
+ }
+
+ response = kmalloc(resp_size, GFP_KERNEL);
+ if (response == NULL) {
+ res = -ENOMEM;
+ goto out_dma;
+ }
+ memset(response, 0, resp_size);
+
+ /* The first LUN ID will always be 0 per the SAM spec */
+ for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) {
+ /*
+ * Set the LUN Id and then increment to the next LUN
+ * location in the parameter data.
+ */
+ __be64 tmp_id = cpu_to_be64(lun_id);
+ memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64));
+ lun_id_offset += LUN_ENTRY_SIZE;
+ }
+ tmp_len = cpu_to_be32(ll_length);
+ memcpy(response, &tmp_len, sizeof(u32));
+ }
+
+ xfer_len = min(alloc_len, resp_size);
+ res = nvme_trans_copy_to_user(hdr, response, xfer_len);
+
+ kfree(response);
+ out_dma:
+ dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
+ dma_addr);
+ out:
+ return res;
+}
+
+static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 alloc_len, xfer_len, resp_size;
+ u8 desc_format;
+ u8 *response;
+
+ alloc_len = GET_REQUEST_SENSE_ALLOC_LENGTH(cmd);
+ desc_format = GET_U8_FROM_CDB(cmd, REQUEST_SENSE_DESC_OFFSET);
+ desc_format &= REQUEST_SENSE_DESC_MASK;
+
+ resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) :
+ (FIXED_FMT_SENSE_DATA_SIZE));
+ response = kmalloc(resp_size, GFP_KERNEL);
+ if (response == NULL) {
+ res = -ENOMEM;
+ goto out;
+ }
+ memset(response, 0, resp_size);
+
+ if (desc_format == DESCRIPTOR_FORMAT_SENSE_DATA_TYPE) {
+ /* Descriptor Format Sense Data */
+ response[0] = DESC_FORMAT_SENSE_DATA;
+ response[1] = NO_SENSE;
+ /* TODO How is LOW POWER CONDITION ON handled? (byte 2) */
+ response[2] = SCSI_ASC_NO_SENSE;
+ response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ /* SDAT_OVFL = 0 | Additional Sense Length = 0 */
+ } else {
+ /* Fixed Format Sense Data */
+ response[0] = FIXED_SENSE_DATA;
+ /* Byte 1 = Obsolete */
+ response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */
+ /* Bytes 3-6 - Information - set to zero */
+ response[7] = FIXED_SENSE_DATA_ADD_LENGTH;
+ /* Bytes 8-11 - Cmd Specific Information - set to zero */
+ response[12] = SCSI_ASC_NO_SENSE;
+ response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ /* Byte 14 = Field Replaceable Unit Code = 0 */
+ /* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */
+ }
+
+ xfer_len = min(alloc_len, resp_size);
+ res = nvme_trans_copy_to_user(hdr, response, xfer_len);
+
+ kfree(response);
+ out:
+ return res;
+}
+
+static int nvme_trans_security_protocol(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+}
+
+static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_queue *nvmeq;
+ struct nvme_command c;
+ u8 immed, pcmod, pc, no_flush, start;
+
+ immed = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_IMMED_OFFSET);
+ pcmod = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET);
+ pc = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_OFFSET);
+ no_flush = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_NO_FLUSH_OFFSET);
+ start = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_START_OFFSET);
+
+ immed &= START_STOP_UNIT_CDB_IMMED_MASK;
+ pcmod &= START_STOP_UNIT_CDB_POWER_COND_MOD_MASK;
+ pc = (pc & START_STOP_UNIT_CDB_POWER_COND_MASK) >> NIBBLE_SHIFT;
+ no_flush &= START_STOP_UNIT_CDB_NO_FLUSH_MASK;
+ start &= START_STOP_UNIT_CDB_START_MASK;
+
+ if (immed != 0) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ } else {
+ if (no_flush == 0) {
+ /* Issue NVME FLUSH command prior to START STOP UNIT */
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_cmd_flush;
+ c.common.nsid = cpu_to_le32(ns->ns_id);
+
+ nvmeq = get_nvmeq(ns->dev);
+ put_nvmeq(nvmeq);
+ nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out;
+ if (nvme_sc) {
+ res = nvme_sc;
+ goto out;
+ }
+ }
+ /* Setup the expected power state transition */
+ res = nvme_trans_power_state(ns, hdr, pc, pcmod, start);
+ }
+
+ out:
+ return res;
+}
+
+static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr, u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ int nvme_sc;
+ struct nvme_command c;
+ struct nvme_queue *nvmeq;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_cmd_flush;
+ c.common.nsid = cpu_to_le32(ns->ns_id);
+
+ nvmeq = get_nvmeq(ns->dev);
+ put_nvmeq(nvmeq);
+ nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+
+ res = nvme_trans_status_code(hdr, nvme_sc);
+ if (res)
+ goto out;
+ if (nvme_sc)
+ res = nvme_sc;
+
+ out:
+ return res;
+}
+
+static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u8 parm_hdr_len = 0;
+ u8 nvme_pf_code = 0;
+ u8 format_prot_info, long_list, format_data;
+
+ format_prot_info = GET_U8_FROM_CDB(cmd,
+ FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET);
+ long_list = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_LONG_LIST_OFFSET);
+ format_data = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET);
+
+ format_prot_info = (format_prot_info &
+ FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK) >>
+ FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT;
+ long_list &= FORMAT_UNIT_CDB_LONG_LIST_MASK;
+ format_data &= FORMAT_UNIT_CDB_FORMAT_DATA_MASK;
+
+ if (format_data != 0) {
+ if (format_prot_info != 0) {
+ if (long_list == 0)
+ parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN;
+ else
+ parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN;
+ }
+ } else if (format_data == 0 && format_prot_info != 0) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+
+ /* Get parm header from data-in/out buffer */
+ /*
+ * According to the translation spec, the only fields in the parameter
+ * list we are concerned with are in the header. So allocate only that.
+ */
+ if (parm_hdr_len > 0) {
+ res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len,
+ format_prot_info, &nvme_pf_code);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+ }
+
+ /* Attempt to activate any previously downloaded firmware image */
+ res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, 0, 0, 0);
+
+ /* Determine Block size and count and send format command */
+ res = nvme_trans_fmt_set_blk_size_count(ns, hdr);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+
+ res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code);
+
+ out:
+ return res;
+}
+
+static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
+ struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ struct nvme_dev *dev = ns->dev;
+
+ if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY))
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ NOT_READY, SCSI_ASC_LUN_NOT_READY,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ else
+ res = nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0);
+
+ return res;
+}
+
+static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ int res = SNTI_TRANSLATION_SUCCESS;
+ u32 buffer_offset, parm_list_length;
+ u8 buffer_id, mode;
+
+ parm_list_length =
+ GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET);
+ if (parm_list_length % BYTES_TO_DWORDS != 0) {
+ /* NVMe expects Firmware file to be a whole number of DWORDS */
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ buffer_id = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_ID_OFFSET);
+ if (buffer_id > NVME_MAX_FIRMWARE_SLOT) {
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ goto out;
+ }
+ mode = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_MODE_OFFSET) &
+ WRITE_BUFFER_CDB_MODE_MASK;
+ buffer_offset =
+ GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET);
+
+ switch (mode) {
+ case DOWNLOAD_SAVE_ACTIVATE:
+ res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
+ parm_list_length, buffer_offset,
+ buffer_id);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+ res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
+ parm_list_length, buffer_offset,
+ buffer_id);
+ break;
+ case DOWNLOAD_SAVE_DEFER_ACTIVATE:
+ res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
+ parm_list_length, buffer_offset,
+ buffer_id);
+ break;
+ case ACTIVATE_DEFERRED_MICROCODE:
+ res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
+ parm_list_length, buffer_offset,
+ buffer_id);
+ break;
+ default:
+ res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+
+ out:
+ return res;
+}
+
+struct scsi_unmap_blk_desc {
+ __be64 slba;
+ __be32 nlb;
+ u32 resv;
+};
+
+struct scsi_unmap_parm_list {
+ __be16 unmap_data_len;
+ __be16 unmap_blk_desc_data_len;
+ u32 resv;
+ struct scsi_unmap_blk_desc desc[0];
+};
+
+static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+ u8 *cmd)
+{
+ struct nvme_dev *dev = ns->dev;
+ struct scsi_unmap_parm_list *plist;
+ struct nvme_dsm_range *range;
+ struct nvme_queue *nvmeq;
+ struct nvme_command c;
+ int i, nvme_sc, res = -ENOMEM;
+ u16 ndesc, list_len;
+ dma_addr_t dma_addr;
+
+ list_len = GET_U16_FROM_CDB(cmd, UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET);
+ if (!list_len)
+ return -EINVAL;
+
+ plist = kmalloc(list_len, GFP_KERNEL);
+ if (!plist)
+ return -ENOMEM;
+
+ res = nvme_trans_copy_from_user(hdr, plist, list_len);
+ if (res != SNTI_TRANSLATION_SUCCESS)
+ goto out;
+
+ ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4;
+ if (!ndesc || ndesc > 256) {
+ res = -EINVAL;
+ goto out;
+ }
+
+ range = dma_alloc_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
+ &dma_addr, GFP_KERNEL);
+ if (!range)
+ goto out;
+
+ for (i = 0; i < ndesc; i++) {
+ range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb));
+ range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba));
+ range[i].cattr = 0;
+ }
+
+ memset(&c, 0, sizeof(c));
+ c.dsm.opcode = nvme_cmd_dsm;
+ c.dsm.nsid = cpu_to_le32(ns->ns_id);
+ c.dsm.prp1 = cpu_to_le64(dma_addr);
+ c.dsm.nr = cpu_to_le32(ndesc - 1);
+ c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+
+ nvmeq = get_nvmeq(dev);
+ put_nvmeq(nvmeq);
+
+ nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+ res = nvme_trans_status_code(hdr, nvme_sc);
+
+ dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
+ range, dma_addr);
+ out:
+ kfree(plist);
+ return res;
+}
+
+static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
+{
+ u8 cmd[BLK_MAX_CDB];
+ int retcode;
+ unsigned int opcode;
+
+ if (hdr->cmdp == NULL)
+ return -EMSGSIZE;
+ if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
+ return -EFAULT;
+
+ opcode = cmd[0];
+
+ switch (opcode) {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ retcode = nvme_trans_io(ns, hdr, 0, cmd);
+ break;
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ retcode = nvme_trans_io(ns, hdr, 1, cmd);
+ break;
+ case INQUIRY:
+ retcode = nvme_trans_inquiry(ns, hdr, cmd);
+ break;
+ case LOG_SENSE:
+ retcode = nvme_trans_log_sense(ns, hdr, cmd);
+ break;
+ case MODE_SELECT:
+ case MODE_SELECT_10:
+ retcode = nvme_trans_mode_select(ns, hdr, cmd);
+ break;
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ retcode = nvme_trans_mode_sense(ns, hdr, cmd);
+ break;
+ case READ_CAPACITY:
+ retcode = nvme_trans_read_capacity(ns, hdr, cmd);
+ break;
+ case SERVICE_ACTION_IN:
+ if (IS_READ_CAP_16(cmd))
+ retcode = nvme_trans_read_capacity(ns, hdr, cmd);
+ else
+ goto out;
+ break;
+ case REPORT_LUNS:
+ retcode = nvme_trans_report_luns(ns, hdr, cmd);
+ break;
+ case REQUEST_SENSE:
+ retcode = nvme_trans_request_sense(ns, hdr, cmd);
+ break;
+ case SECURITY_PROTOCOL_IN:
+ case SECURITY_PROTOCOL_OUT:
+ retcode = nvme_trans_security_protocol(ns, hdr, cmd);
+ break;
+ case START_STOP:
+ retcode = nvme_trans_start_stop(ns, hdr, cmd);
+ break;
+ case SYNCHRONIZE_CACHE:
+ retcode = nvme_trans_synchronize_cache(ns, hdr, cmd);
+ break;
+ case FORMAT_UNIT:
+ retcode = nvme_trans_format_unit(ns, hdr, cmd);
+ break;
+ case TEST_UNIT_READY:
+ retcode = nvme_trans_test_unit_ready(ns, hdr, cmd);
+ break;
+ case WRITE_BUFFER:
+ retcode = nvme_trans_write_buffer(ns, hdr, cmd);
+ break;
+ case UNMAP:
+ retcode = nvme_trans_unmap(ns, hdr, cmd);
+ break;
+ default:
+ out:
+ retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+ ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
+ SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ break;
+ }
+ return retcode;
+}
+
+int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
+{
+ struct sg_io_hdr hdr;
+ int retcode;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&hdr, u_hdr, sizeof(hdr)))
+ return -EFAULT;
+ if (hdr.interface_id != 'S')
+ return -EINVAL;
+ if (hdr.cmd_len > BLK_MAX_CDB)
+ return -EINVAL;
+
+ retcode = nvme_scsi_translate(ns, &hdr);
+ if (retcode < 0)
+ return retcode;
+ if (retcode > 0)
+ retcode = SNTI_TRANSLATION_SUCCESS;
+ if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0)
+ return -EFAULT;
+
+ return retcode;
+}
+
+int nvme_sg_get_version_num(int __user *ip)
+{
+ return put_user(sg_version_num, ip);
+}
depends on PCI && X86
select DMA_ENGINE
select DCA
- select ASYNC_TX_DISABLE_PQ_VAL_DMA
- select ASYNC_TX_DISABLE_XOR_VAL_DMA
help
Enable support for the Intel(R) I/OAT DMA engine present
in recent Intel Xeon chipsets.
This DMA controller transfers data from memory to peripheral fifo
or vice versa. It does not support memory to memory data transfer.
-
-
-config SH_DMAE
- tristate "Renesas SuperH DMAC support"
- depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE)
- depends on !SH_DMA_API
- select DMA_ENGINE
- help
- Enable support for the Renesas SuperH DMA controllers.
+source "drivers/dma/sh/Kconfig"
config COH901318
bool "ST-Ericsson COH901318 DMA support"
config DMA_VIRTUAL_CHANNELS
tristate
+config DMA_ACPI
+ def_bool y
+ depends on ACPI
+
config DMA_OF
def_bool y
depends on OF
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
+obj-$(CONFIG_DMA_ACPI) += acpi-dma.o
obj-$(CONFIG_DMA_OF) += of-dma.o
obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
-obj-$(CONFIG_SH_DMAE) += sh/
+obj-$(CONFIG_SH_DMAE_BASE) += sh/
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
--- /dev/null
+/*
+ * ACPI helpers for DMA request / controller
+ *
+ * Based on of-dma.c
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/acpi_dma.h>
+
+static LIST_HEAD(acpi_dma_list);
+static DEFINE_MUTEX(acpi_dma_lock);
+
+/**
+ * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers
+ * @dev: struct device of DMA controller
+ * @acpi_dma_xlate: translation function which converts a dma specifier
+ * into a dma_chan structure
+ * @data pointer to controller specific data to be used by
+ * translation function
+ *
+ * Returns 0 on success or appropriate errno value on error.
+ *
+ * Allocated memory should be freed with appropriate acpi_dma_controller_free()
+ * call.
+ */
+int acpi_dma_controller_register(struct device *dev,
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *),
+ void *data)
+{
+ struct acpi_device *adev;
+ struct acpi_dma *adma;
+
+ if (!dev || !acpi_dma_xlate)
+ return -EINVAL;
+
+ /* Check if the device was enumerated by ACPI */
+ if (!ACPI_HANDLE(dev))
+ return -EINVAL;
+
+ if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
+ return -EINVAL;
+
+ adma = kzalloc(sizeof(*adma), GFP_KERNEL);
+ if (!adma)
+ return -ENOMEM;
+
+ adma->dev = dev;
+ adma->acpi_dma_xlate = acpi_dma_xlate;
+ adma->data = data;
+
+ /* Now queue acpi_dma controller structure in list */
+ mutex_lock(&acpi_dma_lock);
+ list_add_tail(&adma->dma_controllers, &acpi_dma_list);
+ mutex_unlock(&acpi_dma_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_dma_controller_register);
+
+/**
+ * acpi_dma_controller_free - Remove a DMA controller from ACPI DMA helpers list
+ * @dev: struct device of DMA controller
+ *
+ * Memory allocated by acpi_dma_controller_register() is freed here.
+ */
+int acpi_dma_controller_free(struct device *dev)
+{
+ struct acpi_dma *adma;
+
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&acpi_dma_lock);
+
+ list_for_each_entry(adma, &acpi_dma_list, dma_controllers)
+ if (adma->dev == dev) {
+ list_del(&adma->dma_controllers);
+ mutex_unlock(&acpi_dma_lock);
+ kfree(adma);
+ return 0;
+ }
+
+ mutex_unlock(&acpi_dma_lock);
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(acpi_dma_controller_free);
+
+static void devm_acpi_dma_release(struct device *dev, void *res)
+{
+ acpi_dma_controller_free(dev);
+}
+
+/**
+ * devm_acpi_dma_controller_register - resource managed acpi_dma_controller_register()
+ * @dev: device that is registering this DMA controller
+ * @acpi_dma_xlate: translation function
+ * @data pointer to controller specific data
+ *
+ * Managed acpi_dma_controller_register(). DMA controller registered by this
+ * function are automatically freed on driver detach. See
+ * acpi_dma_controller_register() for more information.
+ */
+int devm_acpi_dma_controller_register(struct device *dev,
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *),
+ void *data)
+{
+ void *res;
+ int ret;
+
+ res = devres_alloc(devm_acpi_dma_release, 0, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ ret = acpi_dma_controller_register(dev, acpi_dma_xlate, data);
+ if (ret) {
+ devres_free(res);
+ return ret;
+ }
+ devres_add(dev, res);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
+
+/**
+ * devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free()
+ *
+ * Unregister a DMA controller registered with
+ * devm_acpi_dma_controller_register(). Normally this function will not need to
+ * be called and the resource management code will ensure that the resource is
+ * freed.
+ */
+void devm_acpi_dma_controller_free(struct device *dev)
+{
+ WARN_ON(devres_destroy(dev, devm_acpi_dma_release, NULL, NULL));
+}
+EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
+
+struct acpi_dma_parser_data {
+ struct acpi_dma_spec dma_spec;
+ size_t index;
+ size_t n;
+};
+
+/**
+ * acpi_dma_parse_fixed_dma - Parse FixedDMA ACPI resources to a DMA specifier
+ * @res: struct acpi_resource to get FixedDMA resources from
+ * @data: pointer to a helper struct acpi_dma_parser_data
+ */
+static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data)
+{
+ struct acpi_dma_parser_data *pdata = data;
+
+ if (res->type == ACPI_RESOURCE_TYPE_FIXED_DMA) {
+ struct acpi_resource_fixed_dma *dma = &res->data.fixed_dma;
+
+ if (pdata->n++ == pdata->index) {
+ pdata->dma_spec.chan_id = dma->channels;
+ pdata->dma_spec.slave_id = dma->request_lines;
+ }
+ }
+
+ /* Tell the ACPI core to skip this resource */
+ return 1;
+}
+
+/**
+ * acpi_dma_request_slave_chan_by_index - Get the DMA slave channel
+ * @dev: struct device to get DMA request from
+ * @index: index of FixedDMA descriptor for @dev
+ *
+ * Returns pointer to appropriate dma channel on success or NULL on error.
+ */
+struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
+ size_t index)
+{
+ struct acpi_dma_parser_data pdata;
+ struct acpi_dma_spec *dma_spec = &pdata.dma_spec;
+ struct list_head resource_list;
+ struct acpi_device *adev;
+ struct acpi_dma *adma;
+ struct dma_chan *chan = NULL;
+
+ /* Check if the device was enumerated by ACPI */
+ if (!dev || !ACPI_HANDLE(dev))
+ return NULL;
+
+ if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
+ return NULL;
+
+ memset(&pdata, 0, sizeof(pdata));
+ pdata.index = index;
+
+ /* Initial values for the request line and channel */
+ dma_spec->chan_id = -1;
+ dma_spec->slave_id = -1;
+
+ INIT_LIST_HEAD(&resource_list);
+ acpi_dev_get_resources(adev, &resource_list,
+ acpi_dma_parse_fixed_dma, &pdata);
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0)
+ return NULL;
+
+ mutex_lock(&acpi_dma_lock);
+
+ list_for_each_entry(adma, &acpi_dma_list, dma_controllers) {
+ dma_spec->dev = adma->dev;
+ chan = adma->acpi_dma_xlate(dma_spec, adma);
+ if (chan)
+ break;
+ }
+
+ mutex_unlock(&acpi_dma_lock);
+ return chan;
+}
+EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
+
+/**
+ * acpi_dma_request_slave_chan_by_name - Get the DMA slave channel
+ * @dev: struct device to get DMA request from
+ * @name: represents corresponding FixedDMA descriptor for @dev
+ *
+ * In order to support both Device Tree and ACPI in a single driver we
+ * translate the names "tx" and "rx" here based on the most common case where
+ * the first FixedDMA descriptor is TX and second is RX.
+ *
+ * Returns pointer to appropriate dma channel on success or NULL on error.
+ */
+struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
+ const char *name)
+{
+ size_t index;
+
+ if (!strcmp(name, "tx"))
+ index = 0;
+ else if (!strcmp(name, "rx"))
+ index = 1;
+ else
+ return NULL;
+
+ return acpi_dma_request_slave_chan_by_index(dev, index);
+}
+EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name);
+
+/**
+ * acpi_dma_simple_xlate - Simple ACPI DMA engine translation helper
+ * @dma_spec: pointer to ACPI DMA specifier
+ * @adma: pointer to ACPI DMA controller data
+ *
+ * A simple translation function for ACPI based devices. Passes &struct
+ * dma_spec to the DMA controller driver provided filter function. Returns
+ * pointer to the channel if found or %NULL otherwise.
+ */
+struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec,
+ struct acpi_dma *adma)
+{
+ struct acpi_dma_filter_info *info = adma->data;
+
+ if (!info || !info->filter_fn)
+ return NULL;
+
+ return dma_request_channel(info->dma_cap, info->filter_fn, dma_spec);
+}
+EXPORT_SYMBOL_GPL(acpi_dma_simple_xlate);
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include "at_hdmac_regs.h"
#include "dmaengine.h"
ctrlb |= ATC_DST_ADDR_MODE_FIXED
| ATC_SRC_ADDR_MODE_INCR
| ATC_FC_MEM2PER
- | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
+ | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
reg = sconfig->dst_addr;
for_each_sg(sgl, sg, sg_len, i) {
struct at_desc *desc;
ctrlb |= ATC_DST_ADDR_MODE_INCR
| ATC_SRC_ADDR_MODE_FIXED
| ATC_FC_PER2MEM
- | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
+ | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
reg = sconfig->src_addr;
for_each_sg(sgl, sg, sg_len, i) {
desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
| ATC_SRC_ADDR_MODE_INCR
| ATC_FC_MEM2PER
- | ATC_SIF(AT_DMA_MEM_IF)
- | ATC_DIF(AT_DMA_PER_IF);
+ | ATC_SIF(atchan->mem_if)
+ | ATC_DIF(atchan->per_if);
break;
case DMA_DEV_TO_MEM:
desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
| ATC_SRC_ADDR_MODE_FIXED
| ATC_FC_PER2MEM
- | ATC_SIF(AT_DMA_PER_IF)
- | ATC_DIF(AT_DMA_MEM_IF);
+ | ATC_SIF(atchan->per_if)
+ | ATC_DIF(atchan->mem_if);
break;
default:
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
}
+#ifdef CONFIG_OF
+static bool at_dma_filter(struct dma_chan *chan, void *slave)
+{
+ struct at_dma_slave *atslave = slave;
+
+ if (atslave->dma_dev == chan->device->dev) {
+ chan->private = atslave;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *of_dma)
+{
+ struct dma_chan *chan;
+ struct at_dma_chan *atchan;
+ struct at_dma_slave *atslave;
+ dma_cap_mask_t mask;
+ unsigned int per_id;
+ struct platform_device *dmac_pdev;
+
+ if (dma_spec->args_count != 2)
+ return NULL;
+
+ dmac_pdev = of_find_device_by_node(dma_spec->np);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
+ if (!atslave)
+ return NULL;
+ /*
+ * We can fill both SRC_PER and DST_PER, one of these fields will be
+ * ignored depending on DMA transfer direction.
+ */
+ per_id = dma_spec->args[1];
+ atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW
+ | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id)
+ | ATC_SRC_PER(per_id);
+ atslave->dma_dev = &dmac_pdev->dev;
+
+ chan = dma_request_channel(mask, at_dma_filter, atslave);
+ if (!chan)
+ return NULL;
+
+ atchan = to_at_dma_chan(chan);
+ atchan->per_if = dma_spec->args[0] & 0xff;
+ atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
+
+ return chan;
+}
+#else
+static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *of_dma)
+{
+ return NULL;
+}
+#endif
/*-- Module Management -----------------------------------------------*/
for (i = 0; i < plat_dat->nr_channels; i++) {
struct at_dma_chan *atchan = &atdma->chan[i];
+ atchan->mem_if = AT_DMA_MEM_IF;
+ atchan->per_if = AT_DMA_PER_IF;
atchan->chan_common.device = &atdma->dma_common;
dma_cookie_init(&atchan->chan_common);
list_add_tail(&atchan->chan_common.device_node,
dma_async_device_register(&atdma->dma_common);
+ /*
+ * Do not return an error if the dmac node is not present in order to
+ * not break the existing way of requesting channel with
+ * dma_request_channel().
+ */
+ if (pdev->dev.of_node) {
+ err = of_dma_controller_register(pdev->dev.of_node,
+ at_dma_xlate, atdma);
+ if (err) {
+ dev_err(&pdev->dev, "could not register of_dma_controller\n");
+ goto err_of_dma_controller_register;
+ }
+ }
+
return 0;
+err_of_dma_controller_register:
+ dma_async_device_unregister(&atdma->dma_common);
+ dma_pool_destroy(atdma->dma_desc_pool);
err_pool_create:
platform_set_drvdata(pdev, NULL);
free_irq(platform_get_irq(pdev, 0), atdma);
return err;
}
-static int __exit at_dma_remove(struct platform_device *pdev)
+static int at_dma_remove(struct platform_device *pdev)
{
struct at_dma *atdma = platform_get_drvdata(pdev);
struct dma_chan *chan, *_chan;
};
static struct platform_driver at_dma_driver = {
- .remove = __exit_p(at_dma_remove),
+ .remove = at_dma_remove,
.shutdown = at_dma_shutdown,
.id_table = atdma_devtypes,
.driver = {
* @device: parent device
* @ch_regs: memory mapped register base
* @mask: channel index in a mask
+ * @per_if: peripheral interface
+ * @mem_if: memory interface
* @status: transmit status information from irq/prep* functions
* to tasklet (use atomic operations)
* @tasklet: bottom half to finish transaction work
struct at_dma *device;
void __iomem *ch_regs;
u8 mask;
+ u8 per_if;
+ u8 mem_if;
unsigned long status;
struct tasklet_struct tasklet;
u32 save_cfg;
return err;
}
-static int __exit coh901318_remove(struct platform_device *pdev)
+static int coh901318_remove(struct platform_device *pdev)
{
struct coh901318_base *base = platform_get_drvdata(pdev);
static struct platform_driver coh901318_driver = {
- .remove = __exit_p(coh901318_remove),
+ .remove = coh901318_remove,
.driver = {
.name = "coh901318",
},
#include <linux/rculist.h>
#include <linux/idr.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/acpi_dma.h>
#include <linux/of_dma.h>
static DEFINE_MUTEX(dma_list_mutex);
#define dma_device_satisfies_mask(device, mask) \
__dma_device_satisfies_mask((device), &(mask))
static int
-__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
+__dma_device_satisfies_mask(struct dma_device *device,
+ const dma_cap_mask_t *want)
{
dma_cap_mask_t has;
}
}
-static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
+static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
+ struct dma_device *dev,
dma_filter_fn fn, void *fn_param)
{
struct dma_chan *chan;
* @fn: optional callback to disposition available channels
* @fn_param: opaque parameter to pass to dma_filter_fn
*/
-struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
+struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param)
{
struct dma_device *device, *_d;
struct dma_chan *chan = NULL;
* @dev: pointer to client device structure
* @name: slave channel name
*/
-struct dma_chan *dma_request_slave_channel(struct device *dev, char *name)
+struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name)
{
/* If device-tree is present get slave info from here */
if (dev->of_node)
return of_dma_request_slave_channel(dev->of_node, name);
+ /* If device was enumerated by ACPI get slave info from here */
+ if (ACPI_HANDLE(dev))
+ return acpi_dma_request_slave_chan_by_name(dev, name);
+
return NULL;
}
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
* DMA Engine test module
*
* Copyright (C) 2007 Atmel Corporation
+ * Copyright (C) 2013 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/wait.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
static unsigned int test_buf_size = 16384;
module_param(test_buf_size, uint, S_IRUGO);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
"Pass -1 for infinite timeout");
+/* Maximum amount of mismatched bytes in buffer to print */
+#define MAX_ERROR_COUNT 32
+
/*
* Initialization patterns. All bytes in the source buffer has bit 7
* set, all bytes in the destination buffer has bit 7 cleared.
#define PATTERN_OVERWRITE 0x20
#define PATTERN_COUNT_MASK 0x1f
+enum dmatest_error_type {
+ DMATEST_ET_OK,
+ DMATEST_ET_MAP_SRC,
+ DMATEST_ET_MAP_DST,
+ DMATEST_ET_PREP,
+ DMATEST_ET_SUBMIT,
+ DMATEST_ET_TIMEOUT,
+ DMATEST_ET_DMA_ERROR,
+ DMATEST_ET_DMA_IN_PROGRESS,
+ DMATEST_ET_VERIFY,
+ DMATEST_ET_VERIFY_BUF,
+};
+
+struct dmatest_verify_buffer {
+ unsigned int index;
+ u8 expected;
+ u8 actual;
+};
+
+struct dmatest_verify_result {
+ unsigned int error_count;
+ struct dmatest_verify_buffer data[MAX_ERROR_COUNT];
+ u8 pattern;
+ bool is_srcbuf;
+};
+
+struct dmatest_thread_result {
+ struct list_head node;
+ unsigned int n;
+ unsigned int src_off;
+ unsigned int dst_off;
+ unsigned int len;
+ enum dmatest_error_type type;
+ union {
+ unsigned long data;
+ dma_cookie_t cookie;
+ enum dma_status status;
+ int error;
+ struct dmatest_verify_result *vr;
+ };
+};
+
+struct dmatest_result {
+ struct list_head node;
+ char *name;
+ struct list_head results;
+};
+
+struct dmatest_info;
+
struct dmatest_thread {
struct list_head node;
+ struct dmatest_info *info;
struct task_struct *task;
struct dma_chan *chan;
u8 **srcs;
u8 **dsts;
enum dma_transaction_type type;
+ bool done;
};
struct dmatest_chan {
struct list_head threads;
};
-/*
- * These are protected by dma_list_mutex since they're only used by
- * the DMA filter function callback
+/**
+ * struct dmatest_params - test parameters.
+ * @buf_size: size of the memcpy test buffer
+ * @channel: bus ID of the channel to test
+ * @device: bus ID of the DMA Engine to test
+ * @threads_per_chan: number of threads to start per channel
+ * @max_channels: maximum number of channels to use
+ * @iterations: iterations before stopping test
+ * @xor_sources: number of xor source buffers
+ * @pq_sources: number of p+q source buffers
+ * @timeout: transfer timeout in msec, -1 for infinite timeout
*/
-static LIST_HEAD(dmatest_channels);
-static unsigned int nr_channels;
+struct dmatest_params {
+ unsigned int buf_size;
+ char channel[20];
+ char device[20];
+ unsigned int threads_per_chan;
+ unsigned int max_channels;
+ unsigned int iterations;
+ unsigned int xor_sources;
+ unsigned int pq_sources;
+ int timeout;
+};
-static bool dmatest_match_channel(struct dma_chan *chan)
+/**
+ * struct dmatest_info - test information.
+ * @params: test parameters
+ * @lock: access protection to the fields of this structure
+ */
+struct dmatest_info {
+ /* Test parameters */
+ struct dmatest_params params;
+
+ /* Internal state */
+ struct list_head channels;
+ unsigned int nr_channels;
+ struct mutex lock;
+
+ /* debugfs related stuff */
+ struct dentry *root;
+ struct dmatest_params dbgfs_params;
+
+ /* Test results */
+ struct list_head results;
+ struct mutex results_lock;
+};
+
+static struct dmatest_info test_info;
+
+static bool dmatest_match_channel(struct dmatest_params *params,
+ struct dma_chan *chan)
{
- if (test_channel[0] == '\0')
+ if (params->channel[0] == '\0')
return true;
- return strcmp(dma_chan_name(chan), test_channel) == 0;
+ return strcmp(dma_chan_name(chan), params->channel) == 0;
}
-static bool dmatest_match_device(struct dma_device *device)
+static bool dmatest_match_device(struct dmatest_params *params,
+ struct dma_device *device)
{
- if (test_device[0] == '\0')
+ if (params->device[0] == '\0')
return true;
- return strcmp(dev_name(device->dev), test_device) == 0;
+ return strcmp(dev_name(device->dev), params->device) == 0;
}
static unsigned long dmatest_random(void)
return buf;
}
-static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
+static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
+ unsigned int buf_size)
{
unsigned int i;
u8 *buf;
for ( ; i < start + len; i++)
buf[i] = PATTERN_SRC | PATTERN_COPY
| (~i & PATTERN_COUNT_MASK);
- for ( ; i < test_buf_size; i++)
+ for ( ; i < buf_size; i++)
buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
buf++;
}
}
-static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
+static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
+ unsigned int buf_size)
{
unsigned int i;
u8 *buf;
for ( ; i < start + len; i++)
buf[i] = PATTERN_DST | PATTERN_OVERWRITE
| (~i & PATTERN_COUNT_MASK);
- for ( ; i < test_buf_size; i++)
+ for ( ; i < buf_size; i++)
buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
}
}
-static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
- unsigned int counter, bool is_srcbuf)
-{
- u8 diff = actual ^ pattern;
- u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
- const char *thread_name = current->comm;
-
- if (is_srcbuf)
- pr_warning("%s: srcbuf[0x%x] overwritten!"
- " Expected %02x, got %02x\n",
- thread_name, index, expected, actual);
- else if ((pattern & PATTERN_COPY)
- && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
- pr_warning("%s: dstbuf[0x%x] not copied!"
- " Expected %02x, got %02x\n",
- thread_name, index, expected, actual);
- else if (diff & PATTERN_SRC)
- pr_warning("%s: dstbuf[0x%x] was copied!"
- " Expected %02x, got %02x\n",
- thread_name, index, expected, actual);
- else
- pr_warning("%s: dstbuf[0x%x] mismatch!"
- " Expected %02x, got %02x\n",
- thread_name, index, expected, actual);
-}
-
-static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
- unsigned int end, unsigned int counter, u8 pattern,
- bool is_srcbuf)
+static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
+ unsigned int start, unsigned int end, unsigned int counter,
+ u8 pattern, bool is_srcbuf)
{
unsigned int i;
unsigned int error_count = 0;
u8 expected;
u8 *buf;
unsigned int counter_orig = counter;
+ struct dmatest_verify_buffer *vb;
for (; (buf = *bufs); bufs++) {
counter = counter_orig;
actual = buf[i];
expected = pattern | (~counter & PATTERN_COUNT_MASK);
if (actual != expected) {
- if (error_count < 32)
- dmatest_mismatch(actual, pattern, i,
- counter, is_srcbuf);
+ if (error_count < MAX_ERROR_COUNT && vr) {
+ vb = &vr->data[error_count];
+ vb->index = i;
+ vb->expected = expected;
+ vb->actual = actual;
+ }
error_count++;
}
counter++;
}
}
- if (error_count > 32)
+ if (error_count > MAX_ERROR_COUNT)
pr_warning("%s: %u errors suppressed\n",
- current->comm, error_count - 32);
+ current->comm, error_count - MAX_ERROR_COUNT);
return error_count;
}
return val % 2 ? val : val - 1;
}
+static char *verify_result_get_one(struct dmatest_verify_result *vr,
+ unsigned int i)
+{
+ struct dmatest_verify_buffer *vb = &vr->data[i];
+ u8 diff = vb->actual ^ vr->pattern;
+ static char buf[512];
+ char *msg;
+
+ if (vr->is_srcbuf)
+ msg = "srcbuf overwritten!";
+ else if ((vr->pattern & PATTERN_COPY)
+ && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
+ msg = "dstbuf not copied!";
+ else if (diff & PATTERN_SRC)
+ msg = "dstbuf was copied!";
+ else
+ msg = "dstbuf mismatch!";
+
+ snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg,
+ vb->index, vb->expected, vb->actual);
+
+ return buf;
+}
+
+static char *thread_result_get(const char *name,
+ struct dmatest_thread_result *tr)
+{
+ static const char * const messages[] = {
+ [DMATEST_ET_OK] = "No errors",
+ [DMATEST_ET_MAP_SRC] = "src mapping error",
+ [DMATEST_ET_MAP_DST] = "dst mapping error",
+ [DMATEST_ET_PREP] = "prep error",
+ [DMATEST_ET_SUBMIT] = "submit error",
+ [DMATEST_ET_TIMEOUT] = "test timed out",
+ [DMATEST_ET_DMA_ERROR] =
+ "got completion callback (DMA_ERROR)",
+ [DMATEST_ET_DMA_IN_PROGRESS] =
+ "got completion callback (DMA_IN_PROGRESS)",
+ [DMATEST_ET_VERIFY] = "errors",
+ [DMATEST_ET_VERIFY_BUF] = "verify errors",
+ };
+ static char buf[512];
+
+ snprintf(buf, sizeof(buf) - 1,
+ "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
+ name, tr->n, messages[tr->type], tr->src_off, tr->dst_off,
+ tr->len, tr->data);
+
+ return buf;
+}
+
+static int thread_result_add(struct dmatest_info *info,
+ struct dmatest_result *r, enum dmatest_error_type type,
+ unsigned int n, unsigned int src_off, unsigned int dst_off,
+ unsigned int len, unsigned long data)
+{
+ struct dmatest_thread_result *tr;
+
+ tr = kzalloc(sizeof(*tr), GFP_KERNEL);
+ if (!tr)
+ return -ENOMEM;
+
+ tr->type = type;
+ tr->n = n;
+ tr->src_off = src_off;
+ tr->dst_off = dst_off;
+ tr->len = len;
+ tr->data = data;
+
+ mutex_lock(&info->results_lock);
+ list_add_tail(&tr->node, &r->results);
+ mutex_unlock(&info->results_lock);
+
+ pr_warn("%s\n", thread_result_get(r->name, tr));
+ return 0;
+}
+
+static unsigned int verify_result_add(struct dmatest_info *info,
+ struct dmatest_result *r, unsigned int n,
+ unsigned int src_off, unsigned int dst_off, unsigned int len,
+ u8 **bufs, int whence, unsigned int counter, u8 pattern,
+ bool is_srcbuf)
+{
+ struct dmatest_verify_result *vr;
+ unsigned int error_count;
+ unsigned int buf_off = is_srcbuf ? src_off : dst_off;
+ unsigned int start, end;
+
+ if (whence < 0) {
+ start = 0;
+ end = buf_off;
+ } else if (whence > 0) {
+ start = buf_off + len;
+ end = info->params.buf_size;
+ } else {
+ start = buf_off;
+ end = buf_off + len;
+ }
+
+ vr = kmalloc(sizeof(*vr), GFP_KERNEL);
+ if (!vr) {
+ pr_warn("dmatest: No memory to store verify result\n");
+ return dmatest_verify(NULL, bufs, start, end, counter, pattern,
+ is_srcbuf);
+ }
+
+ vr->pattern = pattern;
+ vr->is_srcbuf = is_srcbuf;
+
+ error_count = dmatest_verify(vr, bufs, start, end, counter, pattern,
+ is_srcbuf);
+ if (error_count) {
+ vr->error_count = error_count;
+ thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off,
+ dst_off, len, (unsigned long)vr);
+ return error_count;
+ }
+
+ kfree(vr);
+ return 0;
+}
+
+static void result_free(struct dmatest_info *info, const char *name)
+{
+ struct dmatest_result *r, *_r;
+
+ mutex_lock(&info->results_lock);
+ list_for_each_entry_safe(r, _r, &info->results, node) {
+ struct dmatest_thread_result *tr, *_tr;
+
+ if (name && strcmp(r->name, name))
+ continue;
+
+ list_for_each_entry_safe(tr, _tr, &r->results, node) {
+ if (tr->type == DMATEST_ET_VERIFY_BUF)
+ kfree(tr->vr);
+ list_del(&tr->node);
+ kfree(tr);
+ }
+
+ kfree(r->name);
+ list_del(&r->node);
+ kfree(r);
+ }
+
+ mutex_unlock(&info->results_lock);
+}
+
+static struct dmatest_result *result_init(struct dmatest_info *info,
+ const char *name)
+{
+ struct dmatest_result *r;
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (r) {
+ r->name = kstrdup(name, GFP_KERNEL);
+ INIT_LIST_HEAD(&r->results);
+ mutex_lock(&info->results_lock);
+ list_add_tail(&r->node, &info->results);
+ mutex_unlock(&info->results_lock);
+ }
+ return r;
+}
+
/*
* This function repeatedly tests DMA transfers of various lengths and
* offsets for a given operation type until it is told to exit by
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
struct dmatest_thread *thread = data;
struct dmatest_done done = { .wait = &done_wait };
+ struct dmatest_info *info;
+ struct dmatest_params *params;
struct dma_chan *chan;
struct dma_device *dev;
const char *thread_name;
dma_cookie_t cookie;
enum dma_status status;
enum dma_ctrl_flags flags;
- u8 pq_coefs[pq_sources + 1];
+ u8 *pq_coefs = NULL;
int ret;
int src_cnt;
int dst_cnt;
int i;
+ struct dmatest_result *result;
thread_name = current->comm;
set_freezable();
ret = -ENOMEM;
smp_rmb();
+ info = thread->info;
+ params = &info->params;
chan = thread->chan;
dev = chan->device;
if (thread->type == DMA_MEMCPY)
src_cnt = dst_cnt = 1;
else if (thread->type == DMA_XOR) {
/* force odd to ensure dst = src */
- src_cnt = min_odd(xor_sources | 1, dev->max_xor);
+ src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
dst_cnt = 1;
} else if (thread->type == DMA_PQ) {
/* force odd to ensure dst = src */
- src_cnt = min_odd(pq_sources | 1, dma_maxpq(dev, 0));
+ src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
dst_cnt = 2;
+
+ pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
+ if (!pq_coefs)
+ goto err_thread_type;
+
for (i = 0; i < src_cnt; i++)
pq_coefs[i] = 1;
} else
+ goto err_thread_type;
+
+ result = result_init(info, thread_name);
+ if (!result)
goto err_srcs;
thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
if (!thread->srcs)
goto err_srcs;
for (i = 0; i < src_cnt; i++) {
- thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
if (!thread->srcs[i])
goto err_srcbuf;
}
if (!thread->dsts)
goto err_dsts;
for (i = 0; i < dst_cnt; i++) {
- thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
if (!thread->dsts[i])
goto err_dstbuf;
}
| DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
while (!kthread_should_stop()
- && !(iterations && total_tests >= iterations)) {
+ && !(params->iterations && total_tests >= params->iterations)) {
struct dma_async_tx_descriptor *tx = NULL;
dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt];
else if (thread->type == DMA_PQ)
align = dev->pq_align;
- if (1 << align > test_buf_size) {
+ if (1 << align > params->buf_size) {
pr_err("%u-byte buffer too small for %d-byte alignment\n",
- test_buf_size, 1 << align);
+ params->buf_size, 1 << align);
break;
}
- len = dmatest_random() % test_buf_size + 1;
+ len = dmatest_random() % params->buf_size + 1;
len = (len >> align) << align;
if (!len)
len = 1 << align;
- src_off = dmatest_random() % (test_buf_size - len + 1);
- dst_off = dmatest_random() % (test_buf_size - len + 1);
+ src_off = dmatest_random() % (params->buf_size - len + 1);
+ dst_off = dmatest_random() % (params->buf_size - len + 1);
src_off = (src_off >> align) << align;
dst_off = (dst_off >> align) << align;
- dmatest_init_srcs(thread->srcs, src_off, len);
- dmatest_init_dsts(thread->dsts, dst_off, len);
+ dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
+ dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
for (i = 0; i < src_cnt; i++) {
u8 *buf = thread->srcs[i] + src_off;
ret = dma_mapping_error(dev->dev, dma_srcs[i]);
if (ret) {
unmap_src(dev->dev, dma_srcs, len, i);
- pr_warn("%s: #%u: mapping error %d with "
- "src_off=0x%x len=0x%x\n",
- thread_name, total_tests - 1, ret,
- src_off, len);
+ thread_result_add(info, result,
+ DMATEST_ET_MAP_SRC,
+ total_tests, src_off, dst_off,
+ len, ret);
failed_tests++;
continue;
}
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
for (i = 0; i < dst_cnt; i++) {
dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
- test_buf_size,
+ params->buf_size,
DMA_BIDIRECTIONAL);
ret = dma_mapping_error(dev->dev, dma_dsts[i]);
if (ret) {
unmap_src(dev->dev, dma_srcs, len, src_cnt);
- unmap_dst(dev->dev, dma_dsts, test_buf_size, i);
- pr_warn("%s: #%u: mapping error %d with "
- "dst_off=0x%x len=0x%x\n",
- thread_name, total_tests - 1, ret,
- dst_off, test_buf_size);
+ unmap_dst(dev->dev, dma_dsts, params->buf_size,
+ i);
+ thread_result_add(info, result,
+ DMATEST_ET_MAP_DST,
+ total_tests, src_off, dst_off,
+ len, ret);
failed_tests++;
continue;
}
if (!tx) {
unmap_src(dev->dev, dma_srcs, len, src_cnt);
- unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
- pr_warning("%s: #%u: prep error with src_off=0x%x "
- "dst_off=0x%x len=0x%x\n",
- thread_name, total_tests - 1,
- src_off, dst_off, len);
+ unmap_dst(dev->dev, dma_dsts, params->buf_size,
+ dst_cnt);
+ thread_result_add(info, result, DMATEST_ET_PREP,
+ total_tests, src_off, dst_off,
+ len, 0);
msleep(100);
failed_tests++;
continue;
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
- pr_warning("%s: #%u: submit error %d with src_off=0x%x "
- "dst_off=0x%x len=0x%x\n",
- thread_name, total_tests - 1, cookie,
- src_off, dst_off, len);
+ thread_result_add(info, result, DMATEST_ET_SUBMIT,
+ total_tests, src_off, dst_off,
+ len, cookie);
msleep(100);
failed_tests++;
continue;
}
dma_async_issue_pending(chan);
- wait_event_freezable_timeout(done_wait, done.done,
- msecs_to_jiffies(timeout));
+ wait_event_freezable_timeout(done_wait,
+ done.done || kthread_should_stop(),
+ msecs_to_jiffies(params->timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
* free it this time?" dancing. For now, just
* leave it dangling.
*/
- pr_warning("%s: #%u: test timed out\n",
- thread_name, total_tests - 1);
+ thread_result_add(info, result, DMATEST_ET_TIMEOUT,
+ total_tests, src_off, dst_off,
+ len, 0);
failed_tests++;
continue;
} else if (status != DMA_SUCCESS) {
- pr_warning("%s: #%u: got completion callback,"
- " but status is \'%s\'\n",
- thread_name, total_tests - 1,
- status == DMA_ERROR ? "error" : "in progress");
+ enum dmatest_error_type type = (status == DMA_ERROR) ?
+ DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
+ thread_result_add(info, result, type,
+ total_tests, src_off, dst_off,
+ len, status);
failed_tests++;
continue;
}
/* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
- unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
+ unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
error_count = 0;
pr_debug("%s: verifying source buffer...\n", thread_name);
- error_count += dmatest_verify(thread->srcs, 0, src_off,
+ error_count += verify_result_add(info, result, total_tests,
+ src_off, dst_off, len, thread->srcs, -1,
0, PATTERN_SRC, true);
- error_count += dmatest_verify(thread->srcs, src_off,
- src_off + len, src_off,
- PATTERN_SRC | PATTERN_COPY, true);
- error_count += dmatest_verify(thread->srcs, src_off + len,
- test_buf_size, src_off + len,
- PATTERN_SRC, true);
-
- pr_debug("%s: verifying dest buffer...\n",
- thread->task->comm);
- error_count += dmatest_verify(thread->dsts, 0, dst_off,
+ error_count += verify_result_add(info, result, total_tests,
+ src_off, dst_off, len, thread->srcs, 0,
+ src_off, PATTERN_SRC | PATTERN_COPY, true);
+ error_count += verify_result_add(info, result, total_tests,
+ src_off, dst_off, len, thread->srcs, 1,
+ src_off + len, PATTERN_SRC, true);
+
+ pr_debug("%s: verifying dest buffer...\n", thread_name);
+ error_count += verify_result_add(info, result, total_tests,
+ src_off, dst_off, len, thread->dsts, -1,
0, PATTERN_DST, false);
- error_count += dmatest_verify(thread->dsts, dst_off,
- dst_off + len, src_off,
- PATTERN_SRC | PATTERN_COPY, false);
- error_count += dmatest_verify(thread->dsts, dst_off + len,
- test_buf_size, dst_off + len,
- PATTERN_DST, false);
+ error_count += verify_result_add(info, result, total_tests,
+ src_off, dst_off, len, thread->dsts, 0,
+ src_off, PATTERN_SRC | PATTERN_COPY, false);
+ error_count += verify_result_add(info, result, total_tests,
+ src_off, dst_off, len, thread->dsts, 1,
+ dst_off + len, PATTERN_DST, false);
if (error_count) {
- pr_warning("%s: #%u: %u errors with "
- "src_off=0x%x dst_off=0x%x len=0x%x\n",
- thread_name, total_tests - 1, error_count,
- src_off, dst_off, len);
+ thread_result_add(info, result, DMATEST_ET_VERIFY,
+ total_tests, src_off, dst_off,
+ len, error_count);
failed_tests++;
} else {
- pr_debug("%s: #%u: No errors with "
- "src_off=0x%x dst_off=0x%x len=0x%x\n",
- thread_name, total_tests - 1,
- src_off, dst_off, len);
+ thread_result_add(info, result, DMATEST_ET_OK,
+ total_tests, src_off, dst_off,
+ len, 0);
}
}
err_srcbuf:
kfree(thread->srcs);
err_srcs:
+ kfree(pq_coefs);
+err_thread_type:
pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
thread_name, total_tests, failed_tests, ret);
if (ret)
dmaengine_terminate_all(chan);
- if (iterations > 0)
+ thread->done = true;
+
+ if (params->iterations > 0)
while (!kthread_should_stop()) {
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
interruptible_sleep_on(&wait_dmatest_exit);
kfree(dtc);
}
-static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type)
+static int dmatest_add_threads(struct dmatest_info *info,
+ struct dmatest_chan *dtc, enum dma_transaction_type type)
{
+ struct dmatest_params *params = &info->params;
struct dmatest_thread *thread;
struct dma_chan *chan = dtc->chan;
char *op;
else
return -EINVAL;
- for (i = 0; i < threads_per_chan; i++) {
+ for (i = 0; i < params->threads_per_chan; i++) {
thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
if (!thread) {
pr_warning("dmatest: No memory for %s-%s%u\n",
break;
}
+ thread->info = info;
thread->chan = dtc->chan;
thread->type = type;
smp_wmb();
return i;
}
-static int dmatest_add_channel(struct dma_chan *chan)
+static int dmatest_add_channel(struct dmatest_info *info,
+ struct dma_chan *chan)
{
struct dmatest_chan *dtc;
struct dma_device *dma_dev = chan->device;
INIT_LIST_HEAD(&dtc->threads);
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
- cnt = dmatest_add_threads(dtc, DMA_MEMCPY);
+ cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
thread_count += cnt > 0 ? cnt : 0;
}
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
- cnt = dmatest_add_threads(dtc, DMA_XOR);
+ cnt = dmatest_add_threads(info, dtc, DMA_XOR);
thread_count += cnt > 0 ? cnt : 0;
}
if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
- cnt = dmatest_add_threads(dtc, DMA_PQ);
+ cnt = dmatest_add_threads(info, dtc, DMA_PQ);
thread_count += cnt > 0 ? cnt : 0;
}
pr_info("dmatest: Started %u threads using %s\n",
thread_count, dma_chan_name(chan));
- list_add_tail(&dtc->node, &dmatest_channels);
- nr_channels++;
+ list_add_tail(&dtc->node, &info->channels);
+ info->nr_channels++;
return 0;
}
static bool filter(struct dma_chan *chan, void *param)
{
- if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device))
+ struct dmatest_params *params = param;
+
+ if (!dmatest_match_channel(params, chan) ||
+ !dmatest_match_device(params, chan->device))
return false;
else
return true;
}
-static int __init dmatest_init(void)
+static int __run_threaded_test(struct dmatest_info *info)
{
dma_cap_mask_t mask;
struct dma_chan *chan;
+ struct dmatest_params *params = &info->params;
int err = 0;
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
for (;;) {
- chan = dma_request_channel(mask, filter, NULL);
+ chan = dma_request_channel(mask, filter, params);
if (chan) {
- err = dmatest_add_channel(chan);
+ err = dmatest_add_channel(info, chan);
if (err) {
dma_release_channel(chan);
break; /* add_channel failed, punt */
}
} else
break; /* no more channels available */
- if (max_channels && nr_channels >= max_channels)
+ if (params->max_channels &&
+ info->nr_channels >= params->max_channels)
break; /* we have all we need */
}
-
return err;
}
-/* when compiled-in wait for drivers to load first */
-late_initcall(dmatest_init);
-static void __exit dmatest_exit(void)
+#ifndef MODULE
+static int run_threaded_test(struct dmatest_info *info)
+{
+ int ret;
+
+ mutex_lock(&info->lock);
+ ret = __run_threaded_test(info);
+ mutex_unlock(&info->lock);
+ return ret;
+}
+#endif
+
+static void __stop_threaded_test(struct dmatest_info *info)
{
struct dmatest_chan *dtc, *_dtc;
struct dma_chan *chan;
- list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
+ list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
list_del(&dtc->node);
chan = dtc->chan;
dmatest_cleanup_channel(dtc);
- pr_debug("dmatest: dropped channel %s\n",
- dma_chan_name(chan));
+ pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
dma_release_channel(chan);
}
+
+ info->nr_channels = 0;
+}
+
+static void stop_threaded_test(struct dmatest_info *info)
+{
+ mutex_lock(&info->lock);
+ __stop_threaded_test(info);
+ mutex_unlock(&info->lock);
+}
+
+static int __restart_threaded_test(struct dmatest_info *info, bool run)
+{
+ struct dmatest_params *params = &info->params;
+ int ret;
+
+ /* Stop any running test first */
+ __stop_threaded_test(info);
+
+ if (run == false)
+ return 0;
+
+ /* Clear results from previous run */
+ result_free(info, NULL);
+
+ /* Copy test parameters */
+ memcpy(params, &info->dbgfs_params, sizeof(*params));
+
+ /* Run test with new parameters */
+ ret = __run_threaded_test(info);
+ if (ret) {
+ __stop_threaded_test(info);
+ pr_err("dmatest: Can't run test\n");
+ }
+
+ return ret;
+}
+
+static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
+ const void __user *from, size_t count)
+{
+ char tmp[20];
+ ssize_t len;
+
+ len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count);
+ if (len >= 0) {
+ tmp[len] = '\0';
+ strlcpy(to, strim(tmp), available);
+ }
+
+ return len;
+}
+
+static ssize_t dtf_read_channel(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dmatest_info *info = file->private_data;
+ return simple_read_from_buffer(buf, count, ppos,
+ info->dbgfs_params.channel,
+ strlen(info->dbgfs_params.channel));
+}
+
+static ssize_t dtf_write_channel(struct file *file, const char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dmatest_info *info = file->private_data;
+ return dtf_write_string(info->dbgfs_params.channel,
+ sizeof(info->dbgfs_params.channel),
+ ppos, buf, size);
+}
+
+static const struct file_operations dtf_channel_fops = {
+ .read = dtf_read_channel,
+ .write = dtf_write_channel,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t dtf_read_device(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dmatest_info *info = file->private_data;
+ return simple_read_from_buffer(buf, count, ppos,
+ info->dbgfs_params.device,
+ strlen(info->dbgfs_params.device));
+}
+
+static ssize_t dtf_write_device(struct file *file, const char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dmatest_info *info = file->private_data;
+ return dtf_write_string(info->dbgfs_params.device,
+ sizeof(info->dbgfs_params.device),
+ ppos, buf, size);
+}
+
+static const struct file_operations dtf_device_fops = {
+ .read = dtf_read_device,
+ .write = dtf_write_device,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dmatest_info *info = file->private_data;
+ char buf[3];
+ struct dmatest_chan *dtc;
+ bool alive = false;
+
+ mutex_lock(&info->lock);
+ list_for_each_entry(dtc, &info->channels, node) {
+ struct dmatest_thread *thread;
+
+ list_for_each_entry(thread, &dtc->threads, node) {
+ if (!thread->done) {
+ alive = true;
+ break;
+ }
+ }
+ }
+
+ if (alive) {
+ buf[0] = 'Y';
+ } else {
+ __stop_threaded_test(info);
+ buf[0] = 'N';
+ }
+
+ mutex_unlock(&info->lock);
+ buf[1] = '\n';
+ buf[2] = 0x00;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dmatest_info *info = file->private_data;
+ char buf[16];
+ bool bv;
+ int ret = 0;
+
+ if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
+ return -EFAULT;
+
+ if (strtobool(buf, &bv) == 0) {
+ mutex_lock(&info->lock);
+ ret = __restart_threaded_test(info, bv);
+ mutex_unlock(&info->lock);
+ }
+
+ return ret ? ret : count;
+}
+
+static const struct file_operations dtf_run_fops = {
+ .read = dtf_read_run,
+ .write = dtf_write_run,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static int dtf_results_show(struct seq_file *sf, void *data)
+{
+ struct dmatest_info *info = sf->private;
+ struct dmatest_result *result;
+ struct dmatest_thread_result *tr;
+ unsigned int i;
+
+ mutex_lock(&info->results_lock);
+ list_for_each_entry(result, &info->results, node) {
+ list_for_each_entry(tr, &result->results, node) {
+ seq_printf(sf, "%s\n",
+ thread_result_get(result->name, tr));
+ if (tr->type == DMATEST_ET_VERIFY_BUF) {
+ for (i = 0; i < tr->vr->error_count; i++) {
+ seq_printf(sf, "\t%s\n",
+ verify_result_get_one(tr->vr, i));
+ }
+ }
+ }
+ }
+
+ mutex_unlock(&info->results_lock);
+ return 0;
+}
+
+static int dtf_results_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dtf_results_show, inode->i_private);
+}
+
+static const struct file_operations dtf_results_fops = {
+ .open = dtf_results_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dmatest_register_dbgfs(struct dmatest_info *info)
+{
+ struct dentry *d;
+ struct dmatest_params *params = &info->dbgfs_params;
+ int ret = -ENOMEM;
+
+ d = debugfs_create_dir("dmatest", NULL);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
+ if (!d)
+ goto err_root;
+
+ info->root = d;
+
+ /* Copy initial values */
+ memcpy(params, &info->params, sizeof(*params));
+
+ /* Test parameters */
+
+ d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root,
+ (u32 *)¶ms->buf_size);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root,
+ info, &dtf_channel_fops);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root,
+ info, &dtf_device_fops);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root,
+ (u32 *)¶ms->threads_per_chan);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root,
+ (u32 *)¶ms->max_channels);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root,
+ (u32 *)¶ms->iterations);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root,
+ (u32 *)¶ms->xor_sources);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root,
+ (u32 *)¶ms->pq_sources);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root,
+ (u32 *)¶ms->timeout);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ /* Run or stop threaded test */
+ d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root,
+ info, &dtf_run_fops);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ /* Results of test in progress */
+ d = debugfs_create_file("results", S_IRUGO, info->root, info,
+ &dtf_results_fops);
+ if (IS_ERR_OR_NULL(d))
+ goto err_node;
+
+ return 0;
+
+err_node:
+ debugfs_remove_recursive(info->root);
+err_root:
+ pr_err("dmatest: Failed to initialize debugfs\n");
+ return ret;
+}
+
+static int __init dmatest_init(void)
+{
+ struct dmatest_info *info = &test_info;
+ struct dmatest_params *params = &info->params;
+ int ret;
+
+ memset(info, 0, sizeof(*info));
+
+ mutex_init(&info->lock);
+ INIT_LIST_HEAD(&info->channels);
+
+ mutex_init(&info->results_lock);
+ INIT_LIST_HEAD(&info->results);
+
+ /* Set default parameters */
+ params->buf_size = test_buf_size;
+ strlcpy(params->channel, test_channel, sizeof(params->channel));
+ strlcpy(params->device, test_device, sizeof(params->device));
+ params->threads_per_chan = threads_per_chan;
+ params->max_channels = max_channels;
+ params->iterations = iterations;
+ params->xor_sources = xor_sources;
+ params->pq_sources = pq_sources;
+ params->timeout = timeout;
+
+ ret = dmatest_register_dbgfs(info);
+ if (ret)
+ return ret;
+
+#ifdef MODULE
+ return 0;
+#else
+ return run_threaded_test(info);
+#endif
+}
+/* when compiled-in wait for drivers to load first */
+late_initcall(dmatest_init);
+
+static void __exit dmatest_exit(void)
+{
+ struct dmatest_info *info = &test_info;
+
+ debugfs_remove_recursive(info->root);
+ stop_threaded_test(info);
+ result_free(info, NULL);
}
module_exit(dmatest_exit);
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/acpi_dma.h>
#include "dw_dmac_regs.h"
#include "dmaengine.h"
return slave ? slave->src_master : 1;
}
-#define SRC_MASTER 0
-#define DST_MASTER 1
-
-static inline unsigned int dwc_get_master(struct dma_chan *chan, int master)
+static inline void dwc_set_masters(struct dw_dma_chan *dwc)
{
- struct dw_dma *dw = to_dw_dma(chan->device);
- struct dw_dma_slave *dws = chan->private;
- unsigned int m;
-
- if (master == SRC_MASTER)
- m = dwc_get_sms(dws);
- else
- m = dwc_get_dms(dws);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ struct dw_dma_slave *dws = dwc->chan.private;
+ unsigned char mmax = dw->nr_masters - 1;
- return min_t(unsigned int, dw->nr_masters - 1, m);
+ if (dwc->request_line == ~0) {
+ dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
+ dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
+ }
}
#define DWC_DEFAULT_CTLLO(_chan) ({ \
struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
bool _is_slave = is_slave_direction(_dwc->direction); \
- int _dms = dwc_get_master(_chan, DST_MASTER); \
- int _sms = dwc_get_master(_chan, SRC_MASTER); \
u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
DW_DMA_MSIZE_16; \
u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
| DWC_CTLL_SRC_MSIZE(_smsize) \
| DWC_CTLL_LLP_D_EN \
| DWC_CTLL_LLP_S_EN \
- | DWC_CTLL_DMS(_dms) \
- | DWC_CTLL_SMS(_sms)); \
+ | DWC_CTLL_DMS(_dwc->dst_master) \
+ | DWC_CTLL_SMS(_dwc->src_master)); \
})
/*
*/
#define NR_DESCS_PER_CHANNEL 64
-static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master)
-{
- struct dw_dma *dw = to_dw_dma(chan->device);
-
- return dw->data_width[dwc_get_master(chan, master)];
-}
-
/*----------------------------------------------------------------------*/
static struct device *chan2dev(struct dma_chan *chan)
if (dwc->initialized == true)
return;
- if (dws && dws->cfg_hi == ~0 && dws->cfg_lo == ~0) {
- /* autoconfigure based on request line from DT */
- if (dwc->direction == DMA_MEM_TO_DEV)
- cfghi = DWC_CFGH_DST_PER(dwc->request_line);
- else if (dwc->direction == DMA_DEV_TO_MEM)
- cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
- } else if (dws) {
+ if (dws) {
/*
* We need controller-specific data to set up slave
* transfers.
cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
} else {
if (dwc->direction == DMA_MEM_TO_DEV)
- cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
+ cfghi = DWC_CFGH_DST_PER(dwc->request_line);
else if (dwc->direction == DMA_DEV_TO_MEM)
- cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
+ cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
}
channel_writel(dwc, CFG_LO, cfglo);
(unsigned long long)llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
- /* initial residue value */
+ /* Initial residue value */
dwc->residue = desc->total_len;
- /* check first descriptors addr */
+ /* Check first descriptors addr */
if (desc->txd.phys == llp) {
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
- /* check first descriptors llp */
+ /* Check first descriptors llp */
if (desc->lli.llp == llp) {
/* This one is currently in progress */
dwc->residue -= dwc_get_sent(dwc);
}
EXPORT_SYMBOL(dw_dma_get_dst_addr);
-/* called with dwc->lock held and all DMAC interrupts disabled */
+/* Called with dwc->lock held and all DMAC interrupts disabled */
static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
u32 status_err, u32 status_xfer)
{
dwc_chan_disable(dw, dwc);
- /* make sure DMA does not restart by loading a new list */
+ /* Make sure DMA does not restart by loading a new list */
channel_writel(dwc, LLP, 0);
channel_writel(dwc, CTL_LO, 0);
channel_writel(dwc, CTL_HI, 0);
size_t len, unsigned long flags)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc;
struct dw_desc *first;
struct dw_desc *prev;
dwc->direction = DMA_MEM_TO_MEM;
- data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER),
- dwc_get_data_width(chan, DST_MASTER));
+ data_width = min_t(unsigned int, dw->data_width[dwc->src_master],
+ dw->data_width[dwc->dst_master]);
src_width = dst_width = min_t(unsigned int, data_width,
dwc_fast_fls(src | dest | len));
unsigned long flags, void *context)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(chan->device);
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
struct dw_desc *prev;
struct dw_desc *first;
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
DWC_CTLL_FC(DW_DMA_FC_D_M2P);
- data_width = dwc_get_data_width(chan, SRC_MASTER);
+ data_width = dw->data_width[dwc->src_master];
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
DWC_CTLL_FC(DW_DMA_FC_D_P2M);
- data_width = dwc_get_data_width(chan, DST_MASTER);
+ data_width = dw->data_width[dwc->dst_master];
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
*maxburst = 0;
}
-static inline void convert_slave_id(struct dw_dma_chan *dwc)
-{
- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
-
- dwc->dma_sconfig.slave_id -= dw->request_line_base;
-}
-
static int
set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
{
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
dwc->direction = sconfig->direction;
+ /* Take the request line from slave_id member */
+ if (dwc->request_line == ~0)
+ dwc->request_line = sconfig->slave_id;
+
convert_burst(&dwc->dma_sconfig.src_maxburst);
convert_burst(&dwc->dma_sconfig.dst_maxburst);
- convert_slave_id(dwc);
return 0;
}
static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
{
u32 cfglo = channel_readl(dwc, CFG_LO);
+ unsigned int count = 20; /* timeout iterations */
channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
- while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
- cpu_relax();
+ while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
+ udelay(2);
dwc->paused = true;
}
* doesn't mean what you think it means), and status writeback.
*/
+ dwc_set_masters(dwc);
+
spin_lock_irqsave(&dwc->lock, flags);
i = dwc->descs_allocated;
while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
dwc->initialized = false;
+ dwc->request_line = ~0;
/* Disable interrupts */
channel_clear_bit(dw, MASK.XFER, dwc->mask);
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
-struct dw_dma_filter_args {
+/*----------------------------------------------------------------------*/
+
+struct dw_dma_of_filter_args {
struct dw_dma *dw;
unsigned int req;
unsigned int src;
unsigned int dst;
};
-static bool dw_dma_generic_filter(struct dma_chan *chan, void *param)
+static bool dw_dma_of_filter(struct dma_chan *chan, void *param)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma *dw = to_dw_dma(chan->device);
- struct dw_dma_filter_args *fargs = param;
- struct dw_dma_slave *dws = &dwc->slave;
+ struct dw_dma_of_filter_args *fargs = param;
- /* ensure the device matches our channel */
+ /* Ensure the device matches our channel */
if (chan->device != &fargs->dw->dma)
return false;
- dws->dma_dev = dw->dma.dev;
- dws->cfg_hi = ~0;
- dws->cfg_lo = ~0;
- dws->src_master = fargs->src;
- dws->dst_master = fargs->dst;
-
dwc->request_line = fargs->req;
-
- chan->private = dws;
+ dwc->src_master = fargs->src;
+ dwc->dst_master = fargs->dst;
return true;
}
-static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
+static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
{
struct dw_dma *dw = ofdma->of_dma_data;
- struct dw_dma_filter_args fargs = {
+ struct dw_dma_of_filter_args fargs = {
.dw = dw,
};
dma_cap_mask_t cap;
dma_cap_set(DMA_SLAVE, cap);
/* TODO: there should be a simpler way to do this */
- return dma_request_channel(cap, dw_dma_generic_filter, &fargs);
+ return dma_request_channel(cap, dw_dma_of_filter, &fargs);
+}
+
+#ifdef CONFIG_ACPI
+static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct acpi_dma_spec *dma_spec = param;
+
+ if (chan->device->dev != dma_spec->dev ||
+ chan->chan_id != dma_spec->chan_id)
+ return false;
+
+ dwc->request_line = dma_spec->slave_id;
+ dwc->src_master = dwc_get_sms(NULL);
+ dwc->dst_master = dwc_get_dms(NULL);
+
+ return true;
+}
+
+static void dw_dma_acpi_controller_register(struct dw_dma *dw)
+{
+ struct device *dev = dw->dma.dev;
+ struct acpi_dma_filter_info *info;
+ int ret;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return;
+
+ dma_cap_zero(info->dma_cap);
+ dma_cap_set(DMA_SLAVE, info->dma_cap);
+ info->filter_fn = dw_dma_acpi_filter;
+
+ ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
+ info);
+ if (ret)
+ dev_err(dev, "could not register acpi_dma_controller\n");
}
+#else /* !CONFIG_ACPI */
+static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
+#endif /* !CONFIG_ACPI */
/* --------------------- Cyclic DMA API extensions -------------------- */
spin_lock_irqsave(&dwc->lock, flags);
- /* assert channel is idle */
+ /* Assert channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
dev_err(chan2dev(&dwc->chan),
"BUG: Attempted to start non-idle channel\n");
dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask);
- /* setup DMAC channel registers */
+ /* Setup DMAC channel registers */
channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
channel_writel(dwc, CTL_HI, 0);
last = desc;
}
- /* lets make a cyclic list */
+ /* Let's make a cyclic list */
last->lli.llp = cdesc->desc[0]->txd.phys;
dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
static int dw_probe(struct platform_device *pdev)
{
- const struct platform_device_id *match;
struct dw_dma_platform_data *pdata;
struct resource *io;
struct dw_dma *dw;
dw->regs = regs;
- /* get hardware configuration parameters */
+ /* Get hardware configuration parameters */
if (autocfg) {
max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
memcpy(dw->data_width, pdata->data_width, 4);
}
- /* Get the base request line if set */
- match = platform_get_device_id(pdev);
- if (match)
- dw->request_line_base = (unsigned int)match->driver_data;
-
/* Calculate all channel mask before DMA setup */
dw->all_chan_mask = (1 << nr_channels) - 1;
- /* force dma off, just in case */
+ /* Force dma off, just in case */
dw_dma_off(dw);
- /* disable BLOCK interrupts as well */
+ /* Disable BLOCK interrupts as well */
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
platform_set_drvdata(pdev, dw);
- /* create a pool of consistent memory blocks for hardware descriptors */
+ /* Create a pool of consistent memory blocks for hardware descriptors */
dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev,
sizeof(struct dw_desc), 4, 0);
if (!dw->desc_pool) {
channel_clear_bit(dw, CH_EN, dwc->mask);
dwc->direction = DMA_TRANS_NONE;
+ dwc->request_line = ~0;
- /* hardware configuration */
+ /* Hardware configuration */
if (autocfg) {
unsigned int dwc_params;
if (pdev->dev.of_node) {
err = of_dma_controller_register(pdev->dev.of_node,
- dw_dma_xlate, dw);
- if (err && err != -ENODEV)
+ dw_dma_of_xlate, dw);
+ if (err)
dev_err(&pdev->dev,
"could not register of_dma_controller\n");
}
+ if (ACPI_HANDLE(&pdev->dev))
+ dw_dma_acpi_controller_register(dw);
+
return 0;
}
};
#ifdef CONFIG_OF
-static const struct of_device_id dw_dma_id_table[] = {
+static const struct of_device_id dw_dma_of_id_table[] = {
{ .compatible = "snps,dma-spear1340" },
{}
};
-MODULE_DEVICE_TABLE(of, dw_dma_id_table);
+MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
#endif
-static const struct platform_device_id dw_dma_ids[] = {
- /* Name, Request Line Base */
- { "INTL9C60", (kernel_ulong_t)16 },
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id dw_dma_acpi_id_table[] = {
+ { "INTL9C60", 0 },
{ }
};
+#endif
static struct platform_driver dw_driver = {
.probe = dw_probe,
.driver = {
.name = "dw_dmac",
.pm = &dw_dev_pm_ops,
- .of_match_table = of_match_ptr(dw_dma_id_table),
+ .of_match_table = of_match_ptr(dw_dma_of_id_table),
+ .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
},
- .id_table = dw_dma_ids,
};
static int __init dw_init(void)
/* hardware configuration */
unsigned int block_size;
bool nollp;
+
+ /* custom slave configuration */
unsigned int request_line;
- struct dw_dma_slave slave;
+ unsigned char src_master;
+ unsigned char dst_master;
/* configuration passed via DMA_SLAVE_CONFIG */
struct dma_slave_config dma_sconfig;
/* hardware configuration */
unsigned char nr_masters;
unsigned char data_width[4];
- unsigned int request_line_base;
struct dw_dma_chan chan[0];
};
desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
- if (imxdmac->sg_list)
- kfree(imxdmac->sg_list);
+ kfree(imxdmac->sg_list);
imxdmac->sg_list = kcalloc(periods + 1,
sizeof(struct scatterlist), GFP_KERNEL);
return ret;
}
-static int __exit imxdma_remove(struct platform_device *pdev)
+static int imxdma_remove(struct platform_device *pdev)
{
struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
.name = "imx-dma",
},
.id_table = imx_dma_devtype,
- .remove = __exit_p(imxdma_remove),
+ .remove = imxdma_remove,
};
static int __init imxdma_module_init(void)
return ret;
}
-static int __exit sdma_remove(struct platform_device *pdev)
+static int sdma_remove(struct platform_device *pdev)
{
return -EBUSY;
}
.of_match_table = sdma_dt_ids,
},
.id_table = sdma_devtypes,
- .remove = __exit_p(sdma_remove),
+ .remove = sdma_remove,
};
static int __init sdma_module_init(void)
* ioat_dma_setup_interrupts - setup interrupt handler
* @device: ioat device
*/
-static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
+int ioat_dma_setup_interrupts(struct ioatdma_device *device)
{
struct ioat_chan_common *chan;
struct pci_dev *pdev = device->pdev;
}
}
intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
+ device->irq_mode = IOAT_MSIX;
goto done;
msix_single_vector:
pci_disable_msix(pdev);
goto msi;
}
+ device->irq_mode = IOAT_MSIX_SINGLE;
goto done;
msi:
pci_disable_msi(pdev);
goto intx;
}
+ device->irq_mode = IOAT_MSIX;
goto done;
intx:
if (err)
goto err_no_irq;
+ device->irq_mode = IOAT_INTX;
done:
if (device->intr_quirk)
device->intr_quirk(device);
err_no_irq:
/* Disable all interrupt generation */
writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
+ device->irq_mode = IOAT_NOIRQ;
dev_err(dev, "no usable interrupts\n");
return err;
}
+EXPORT_SYMBOL(ioat_dma_setup_interrupts);
static void ioat_disable_interrupts(struct ioatdma_device *device)
{
#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd)
#define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev)
+#define to_pdev(ioat_chan) ((ioat_chan)->device->pdev)
#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
*/
#define NULL_DESC_BUFFER_SIZE 1
+enum ioat_irq_mode {
+ IOAT_NOIRQ = 0,
+ IOAT_MSIX,
+ IOAT_MSIX_SINGLE,
+ IOAT_MSI,
+ IOAT_INTX
+};
+
/**
* struct ioatdma_device - internal representation of a IOAT device
* @pdev: PCI-Express device
void __iomem *reg_base;
struct pci_pool *dma_pool;
struct pci_pool *completion_pool;
+#define MAX_SED_POOLS 5
+ struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
+ struct kmem_cache *sed_pool;
struct dma_device common;
u8 version;
struct msix_entry msix_entries[4];
struct ioat_chan_common *idx[4];
struct dca_provider *dca;
+ enum ioat_irq_mode irq_mode;
+ u32 cap;
void (*intr_quirk)(struct ioatdma_device *device);
int (*enumerate_channels)(struct ioatdma_device *device);
int (*reset_hw)(struct ioat_chan_common *chan);
u16 active;
};
+/**
+ * struct ioat_sed_ent - wrapper around super extended hardware descriptor
+ * @hw: hardware SED
+ * @sed_dma: dma address for the SED
+ * @list: list member
+ * @parent: point to the dma descriptor that's the parent
+ */
+struct ioat_sed_ent {
+ struct ioat_sed_raw_descriptor *hw;
+ dma_addr_t dma;
+ struct ioat_ring_ent *parent;
+ unsigned int hw_pool;
+};
+
static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c)
{
return container_of(c, struct ioat_chan_common, common);
struct device *dev = to_dev(chan);
dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
- " ctl: %#x (op: %d int_en: %d compl: %d)\n", id,
+ " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
(unsigned long long) tx->phys,
(unsigned long long) hw->next, tx->cookie, tx->flags,
hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
return device->idx[index];
}
-static inline u64 ioat_chansts(struct ioat_chan_common *chan)
+static inline u64 ioat_chansts_32(struct ioat_chan_common *chan)
{
u8 ver = chan->device->version;
u64 status;
return status;
}
+#if BITS_PER_LONG == 64
+
+static inline u64 ioat_chansts(struct ioat_chan_common *chan)
+{
+ u8 ver = chan->device->version;
+ u64 status;
+
+ /* With IOAT v3.3 the status register is 64bit. */
+ if (ver >= IOAT_VER_3_3)
+ status = readq(chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
+ else
+ status = ioat_chansts_32(chan);
+
+ return status;
+}
+
+#else
+#define ioat_chansts ioat_chansts_32
+#endif
+
static inline void ioat_start(struct ioat_chan_common *chan)
{
u8 ver = chan->device->version;
dma_addr_t *phys_complete);
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
void ioat_kobject_del(struct ioatdma_device *device);
+int ioat_dma_setup_interrupts(struct ioatdma_device *device);
extern const struct sysfs_ops ioat_sysfs_ops;
extern struct ioat_sysfs_entry ioat_version_attr;
extern struct ioat_sysfs_entry ioat_cap_attr;
#ifdef DEBUG
int id;
#endif
+ struct ioat_sed_ent *sed;
};
static inline struct ioat_ring_ent *
int ioat2_dma_probe(struct ioatdma_device *dev, int dca);
int ioat3_dma_probe(struct ioatdma_device *dev, int dca);
+void ioat3_dma_remove(struct ioatdma_device *dev);
struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
/*
* Support routines for v3+ hardware
*/
-
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/dmaengine.h>
/* ioat hardware assumes at least two sources for raid operations */
#define src_cnt_to_sw(x) ((x) + 2)
#define src_cnt_to_hw(x) ((x) - 2)
+#define ndest_to_sw(x) ((x) + 1)
+#define ndest_to_hw(x) ((x) - 1)
+#define src16_cnt_to_sw(x) ((x) + 9)
+#define src16_cnt_to_hw(x) ((x) - 9)
/* provide a lookup table for setting the source address in the base or
* extended descriptor of an xor or pq descriptor
static const u8 xor_idx_to_desc = 0xe0;
static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
static const u8 pq_idx_to_desc = 0xf8;
+static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2 };
static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
+static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6 };
+
+/*
+ * technically sources 1 and 2 do not require SED, but the op will have
+ * at least 9 descriptors so that's irrelevant.
+ */
+static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1 };
+
+static void ioat3_eh(struct ioat2_dma_chan *ioat);
static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
{
return raw->field[pq_idx_to_field[idx]];
}
+static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
+{
+ struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
+
+ return raw->field[pq16_idx_to_field[idx]];
+}
+
static void pq_set_src(struct ioat_raw_descriptor *descs[2],
dma_addr_t addr, u32 offset, u8 coef, int idx)
{
pq->coef[idx] = coef;
}
+static int sed_get_pq16_pool_idx(int src_cnt)
+{
+
+ return pq16_idx_to_sed[src_cnt];
+}
+
+static bool is_jf_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_snb_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_ivb_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
+ return true;
+ default:
+ return false;
+ }
+
+}
+
+static bool is_hsw_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
+ return true;
+ default:
+ return false;
+ }
+
+}
+
+static bool is_xeon_cb32(struct pci_dev *pdev)
+{
+ return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
+ is_hsw_ioat(pdev);
+}
+
+static bool is_bwd_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_bwd_noraid(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+ return true;
+ default:
+ return false;
+ }
+
+}
+
+static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
+ dma_addr_t addr, u32 offset, u8 coef, int idx)
+{
+ struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
+ struct ioat_pq16a_descriptor *pq16 =
+ (struct ioat_pq16a_descriptor *)desc[1];
+ struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
+
+ raw->field[pq16_idx_to_field[idx]] = addr + offset;
+
+ if (idx < 8)
+ pq->coef[idx] = coef;
+ else
+ pq16->coef[idx - 8] = coef;
+}
+
+static struct ioat_sed_ent *
+ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
+{
+ struct ioat_sed_ent *sed;
+ gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
+
+ sed = kmem_cache_alloc(device->sed_pool, flags);
+ if (!sed)
+ return NULL;
+
+ sed->hw_pool = hw_pool;
+ sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
+ flags, &sed->dma);
+ if (!sed->hw) {
+ kmem_cache_free(device->sed_pool, sed);
+ return NULL;
+ }
+
+ return sed;
+}
+
+static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed)
+{
+ if (!sed)
+ return;
+
+ dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
+ kmem_cache_free(device->sed_pool, sed);
+}
+
static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
struct ioat_ring_ent *desc, int idx)
{
}
break;
}
+ case IOAT_OP_PQ_16S:
+ case IOAT_OP_PQ_VAL_16S: {
+ struct ioat_pq_descriptor *pq = desc->pq;
+ int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
+ struct ioat_raw_descriptor *descs[4];
+ int i;
+
+ /* in the 'continue' case don't unmap the dests as sources */
+ if (dmaf_p_disabled_continue(flags))
+ src_cnt--;
+ else if (dmaf_continue(flags))
+ src_cnt -= 3;
+
+ if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ descs[0] = (struct ioat_raw_descriptor *)pq;
+ descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw);
+ descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]);
+ for (i = 0; i < src_cnt; i++) {
+ dma_addr_t src = pq16_get_src(descs, i);
+
+ ioat_unmap(pdev, src - offset, len,
+ PCI_DMA_TODEVICE, flags, 0);
+ }
+
+ /* the dests are sources in pq validate operations */
+ if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
+ if (!(flags & DMA_PREP_PQ_DISABLE_P))
+ ioat_unmap(pdev, pq->p_addr - offset,
+ len, PCI_DMA_TODEVICE,
+ flags, 0);
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q))
+ ioat_unmap(pdev, pq->q_addr - offset,
+ len, PCI_DMA_TODEVICE,
+ flags, 0);
+ break;
+ }
+ }
+
+ if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (!(flags & DMA_PREP_PQ_DISABLE_P))
+ ioat_unmap(pdev, pq->p_addr - offset, len,
+ PCI_DMA_BIDIRECTIONAL, flags, 1);
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q))
+ ioat_unmap(pdev, pq->q_addr - offset, len,
+ PCI_DMA_BIDIRECTIONAL, flags, 1);
+ }
+ break;
+ }
default:
dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
__func__, desc->hw->ctl_f.op);
return false;
}
+static u64 ioat3_get_current_completion(struct ioat_chan_common *chan)
+{
+ u64 phys_complete;
+ u64 completion;
+
+ completion = *chan->completion;
+ phys_complete = ioat_chansts_to_addr(completion);
+
+ dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
+ (unsigned long long) phys_complete);
+
+ return phys_complete;
+}
+
+static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan,
+ u64 *phys_complete)
+{
+ *phys_complete = ioat3_get_current_completion(chan);
+ if (*phys_complete == chan->last_completion)
+ return false;
+
+ clear_bit(IOAT_COMPLETION_ACK, &chan->state);
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+ return true;
+}
+
+static void
+desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc)
+{
+ struct ioat_dma_descriptor *hw = desc->hw;
+
+ switch (hw->ctl_f.op) {
+ case IOAT_OP_PQ_VAL:
+ case IOAT_OP_PQ_VAL_16S:
+ {
+ struct ioat_pq_descriptor *pq = desc->pq;
+
+ /* check if there's error written */
+ if (!pq->dwbes_f.wbes)
+ return;
+
+ /* need to set a chanerr var for checking to clear later */
+
+ if (pq->dwbes_f.p_val_err)
+ *desc->result |= SUM_CHECK_P_RESULT;
+
+ if (pq->dwbes_f.q_val_err)
+ *desc->result |= SUM_CHECK_Q_RESULT;
+
+ return;
+ }
+ default:
+ return;
+ }
+}
+
/**
* __cleanup - reclaim used descriptors
* @ioat: channel (ring) to clean
static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
{
struct ioat_chan_common *chan = &ioat->base;
+ struct ioatdma_device *device = chan->device;
struct ioat_ring_ent *desc;
bool seen_current = false;
int idx = ioat->tail, i;
dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued);
+ /*
+ * At restart of the channel, the completion address and the
+ * channel status will be 0 due to starting a new chain. Since
+ * it's new chain and the first descriptor "fails", there is
+ * nothing to clean up. We do not want to reap the entire submitted
+ * chain due to this 0 address value and then BUG.
+ */
+ if (!phys_complete)
+ return;
+
active = ioat2_ring_active(ioat);
for (i = 0; i < active && !seen_current; i++) {
struct dma_async_tx_descriptor *tx;
prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
desc = ioat2_get_ring_ent(ioat, idx + i);
dump_desc_dbg(ioat, desc);
+
+ /* set err stat if we are using dwbes */
+ if (device->cap & IOAT_CAP_DWBES)
+ desc_get_errstat(ioat, desc);
+
tx = &desc->txd;
if (tx->cookie) {
dma_cookie_complete(tx);
BUG_ON(i + 1 >= active);
i++;
}
+
+ /* cleanup super extended descriptors */
+ if (desc->sed) {
+ ioat3_free_sed(device, desc->sed);
+ desc->sed = NULL;
+ }
}
smp_mb(); /* finish all descriptor reads before incrementing tail */
ioat->tail = idx + i;
static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- dma_addr_t phys_complete;
+ u64 phys_complete;
spin_lock_bh(&chan->cleanup_lock);
- if (ioat_cleanup_preamble(chan, &phys_complete))
+
+ if (ioat3_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
+
+ if (is_ioat_halted(*chan->completion)) {
+ u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+
+ if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
+ mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
+ ioat3_eh(ioat);
+ }
+ }
+
spin_unlock_bh(&chan->cleanup_lock);
}
static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- dma_addr_t phys_complete;
+ u64 phys_complete;
ioat2_quiesce(chan, 0);
- if (ioat_cleanup_preamble(chan, &phys_complete))
+ if (ioat3_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
__ioat2_restart_chan(ioat);
}
+static void ioat3_eh(struct ioat2_dma_chan *ioat)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ struct pci_dev *pdev = to_pdev(chan);
+ struct ioat_dma_descriptor *hw;
+ u64 phys_complete;
+ struct ioat_ring_ent *desc;
+ u32 err_handled = 0;
+ u32 chanerr_int;
+ u32 chanerr;
+
+ /* cleanup so tail points to descriptor that caused the error */
+ if (ioat3_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
+
+ dev_dbg(to_dev(chan), "%s: error = %x:%x\n",
+ __func__, chanerr, chanerr_int);
+
+ desc = ioat2_get_ring_ent(ioat, ioat->tail);
+ hw = desc->hw;
+ dump_desc_dbg(ioat, desc);
+
+ switch (hw->ctl_f.op) {
+ case IOAT_OP_XOR_VAL:
+ if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
+ *desc->result |= SUM_CHECK_P_RESULT;
+ err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
+ }
+ break;
+ case IOAT_OP_PQ_VAL:
+ case IOAT_OP_PQ_VAL_16S:
+ if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
+ *desc->result |= SUM_CHECK_P_RESULT;
+ err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
+ }
+ if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
+ *desc->result |= SUM_CHECK_Q_RESULT;
+ err_handled |= IOAT_CHANERR_XOR_Q_ERR;
+ }
+ break;
+ }
+
+ /* fault on unhandled error or spurious halt */
+ if (chanerr ^ err_handled || chanerr == 0) {
+ dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
+ __func__, chanerr, err_handled);
+ BUG();
+ }
+
+ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+ pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
+
+ /* mark faulting descriptor as complete */
+ *chan->completion = desc->txd.phys;
+
+ spin_lock_bh(&ioat->prep_lock);
+ ioat3_restart_channel(ioat);
+ spin_unlock_bh(&ioat->prep_lock);
+}
+
static void check_active(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
int i;
dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
- " sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n",
+ " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
+ " src_cnt: %d)\n",
desc_id(desc), (unsigned long long) desc->txd.phys,
(unsigned long long) (pq_ex ? pq_ex->next : pq->next),
desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
(unsigned long long) pq_get_src(descs, i), pq->coef[i]);
dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
+ dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
+}
+
+static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat,
+ struct ioat_ring_ent *desc)
+{
+ struct device *dev = to_dev(&ioat->base);
+ struct ioat_pq_descriptor *pq = desc->pq;
+ struct ioat_raw_descriptor *descs[] = { (void *)pq,
+ (void *)pq,
+ (void *)pq };
+ int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
+ int i;
+
+ if (desc->sed) {
+ descs[1] = (void *)desc->sed->hw;
+ descs[2] = (void *)desc->sed->hw + 64;
+ }
+
+ dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
+ " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
+ " src_cnt: %d)\n",
+ desc_id(desc), (unsigned long long) desc->txd.phys,
+ (unsigned long long) pq->next,
+ desc->txd.flags, pq->size, pq->ctl,
+ pq->ctl_f.op, pq->ctl_f.int_en,
+ pq->ctl_f.compl_write,
+ pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
+ pq->ctl_f.src_cnt);
+ for (i = 0; i < src_cnt; i++) {
+ dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
+ (unsigned long long) pq16_get_src(descs, i),
+ pq->coef[i]);
+ }
+ dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
+ dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
}
static struct dma_async_tx_descriptor *
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_chan_common *chan = &ioat->base;
+ struct ioatdma_device *device = chan->device;
struct ioat_ring_ent *compl_desc;
struct ioat_ring_ent *desc;
struct ioat_ring_ent *ext;
u32 offset = 0;
u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
int i, s, idx, with_ext, num_descs;
+ int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0;
dev_dbg(to_dev(chan), "%s\n", __func__);
/* the engine requires at least two sources (we provide
* order.
*/
if (likely(num_descs) &&
- ioat2_check_space_lock(ioat, num_descs+1) == 0)
+ ioat2_check_space_lock(ioat, num_descs + cb32) == 0)
idx = ioat->head;
else
return NULL;
pq->q_addr = dst[1] + offset;
pq->ctl = 0;
pq->ctl_f.op = op;
+ /* we turn on descriptor write back error status */
+ if (device->cap & IOAT_CAP_DWBES)
+ pq->ctl_f.wb_en = result ? 1 : 0;
pq->ctl_f.src_cnt = src_cnt_to_hw(s);
pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
dump_pq_desc_dbg(ioat, desc, ext);
- /* completion descriptor carries interrupt bit */
- compl_desc = ioat2_get_ring_ent(ioat, idx + i);
- compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
- hw = compl_desc->hw;
- hw->ctl = 0;
- hw->ctl_f.null = 1;
- hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
- hw->ctl_f.compl_write = 1;
- hw->size = NULL_DESC_BUFFER_SIZE;
- dump_desc_dbg(ioat, compl_desc);
+ if (!cb32) {
+ pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ pq->ctl_f.compl_write = 1;
+ compl_desc = desc;
+ } else {
+ /* completion descriptor carries interrupt bit */
+ compl_desc = ioat2_get_ring_ent(ioat, idx + i);
+ compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
+ hw = compl_desc->hw;
+ hw->ctl = 0;
+ hw->ctl_f.null = 1;
+ hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ hw->ctl_f.compl_write = 1;
+ hw->size = NULL_DESC_BUFFER_SIZE;
+ dump_desc_dbg(ioat, compl_desc);
+ }
+
/* we leave the channel locked to ensure in order submission */
return &compl_desc->txd;
}
+static struct dma_async_tx_descriptor *
+__ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
+ const dma_addr_t *dst, const dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf,
+ size_t len, unsigned long flags)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+ struct ioat_chan_common *chan = &ioat->base;
+ struct ioatdma_device *device = chan->device;
+ struct ioat_ring_ent *desc;
+ size_t total_len = len;
+ struct ioat_pq_descriptor *pq;
+ u32 offset = 0;
+ u8 op;
+ int i, s, idx, num_descs;
+
+ /* this function only handles src_cnt 9 - 16 */
+ BUG_ON(src_cnt < 9);
+
+ /* this function is only called with 9-16 sources */
+ op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
+
+ dev_dbg(to_dev(chan), "%s\n", __func__);
+
+ num_descs = ioat2_xferlen_to_descs(ioat, len);
+
+ /*
+ * 16 source pq is only available on cb3.3 and has no completion
+ * write hw bug.
+ */
+ if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0)
+ idx = ioat->head;
+ else
+ return NULL;
+
+ i = 0;
+
+ do {
+ struct ioat_raw_descriptor *descs[4];
+ size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
+
+ desc = ioat2_get_ring_ent(ioat, idx + i);
+ pq = desc->pq;
+
+ descs[0] = (struct ioat_raw_descriptor *) pq;
+
+ desc->sed = ioat3_alloc_sed(device,
+ sed_get_pq16_pool_idx(src_cnt));
+ if (!desc->sed) {
+ dev_err(to_dev(chan),
+ "%s: no free sed entries\n", __func__);
+ return NULL;
+ }
+
+ pq->sed_addr = desc->sed->dma;
+ desc->sed->parent = desc;
+
+ descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
+ descs[2] = (void *)descs[1] + 64;
+
+ for (s = 0; s < src_cnt; s++)
+ pq16_set_src(descs, src[s], offset, scf[s], s);
+
+ /* see the comment for dma_maxpq in include/linux/dmaengine.h */
+ if (dmaf_p_disabled_continue(flags))
+ pq16_set_src(descs, dst[1], offset, 1, s++);
+ else if (dmaf_continue(flags)) {
+ pq16_set_src(descs, dst[0], offset, 0, s++);
+ pq16_set_src(descs, dst[1], offset, 1, s++);
+ pq16_set_src(descs, dst[1], offset, 0, s++);
+ }
+
+ pq->size = xfer_size;
+ pq->p_addr = dst[0] + offset;
+ pq->q_addr = dst[1] + offset;
+ pq->ctl = 0;
+ pq->ctl_f.op = op;
+ pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
+ /* we turn on descriptor write back error status */
+ if (device->cap & IOAT_CAP_DWBES)
+ pq->ctl_f.wb_en = result ? 1 : 0;
+ pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
+ pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
+
+ len -= xfer_size;
+ offset += xfer_size;
+ } while (++i < num_descs);
+
+ /* last pq descriptor carries the unmap parameters and fence bit */
+ desc->txd.flags = flags;
+ desc->len = total_len;
+ if (result)
+ desc->result = result;
+ pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+
+ /* with cb3.3 we should be able to do completion w/o a null desc */
+ pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ pq->ctl_f.compl_write = 1;
+
+ dump_pq16_desc_dbg(ioat, desc);
+
+ /* we leave the channel locked to ensure in order submission */
+ return &desc->txd;
+}
+
static struct dma_async_tx_descriptor *
ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
unsigned long flags)
{
+ struct dma_device *dma = chan->device;
+
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
dst[0] = dst[1];
single_source_coef[0] = scf[0];
single_source_coef[1] = 0;
- return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
- single_source_coef, len, flags);
- } else
- return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf,
- len, flags);
+ return (src_cnt > 8) && (dma->max_pq > 8) ?
+ __ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
+ 2, single_source_coef, len,
+ flags) :
+ __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
+ single_source_coef, len, flags);
+
+ } else {
+ return (src_cnt > 8) && (dma->max_pq > 8) ?
+ __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
+ scf, len, flags) :
+ __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
+ scf, len, flags);
+ }
}
struct dma_async_tx_descriptor *
unsigned int src_cnt, const unsigned char *scf, size_t len,
enum sum_check_flags *pqres, unsigned long flags)
{
+ struct dma_device *dma = chan->device;
+
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
pq[0] = pq[1];
*/
*pqres = 0;
- return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
- flags);
+ return (src_cnt > 8) && (dma->max_pq > 8) ?
+ __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
+ flags) :
+ __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
+ flags);
}
static struct dma_async_tx_descriptor *
ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags)
{
+ struct dma_device *dma = chan->device;
unsigned char scf[src_cnt];
dma_addr_t pq[2];
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[1] = dst; /* specify valid address for disabled result */
- return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
- flags);
+ return (src_cnt > 8) && (dma->max_pq > 8) ?
+ __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
+ flags) :
+ __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
+ flags);
}
struct dma_async_tx_descriptor *
unsigned int src_cnt, size_t len,
enum sum_check_flags *result, unsigned long flags)
{
+ struct dma_device *dma = chan->device;
unsigned char scf[src_cnt];
dma_addr_t pq[2];
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[1] = pq[0]; /* specify valid address for disabled result */
- return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf,
- len, flags);
+
+ return (src_cnt > 8) && (dma->max_pq > 8) ?
+ __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
+ scf, len, flags) :
+ __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
+ scf, len, flags);
}
static struct dma_async_tx_descriptor *
return 0;
}
+static int ioat3_irq_reinit(struct ioatdma_device *device)
+{
+ int msixcnt = device->common.chancnt;
+ struct pci_dev *pdev = device->pdev;
+ int i;
+ struct msix_entry *msix;
+ struct ioat_chan_common *chan;
+ int err = 0;
+
+ switch (device->irq_mode) {
+ case IOAT_MSIX:
+
+ for (i = 0; i < msixcnt; i++) {
+ msix = &device->msix_entries[i];
+ chan = ioat_chan_by_index(device, i);
+ devm_free_irq(&pdev->dev, msix->vector, chan);
+ }
+
+ pci_disable_msix(pdev);
+ break;
+
+ case IOAT_MSIX_SINGLE:
+ msix = &device->msix_entries[0];
+ chan = ioat_chan_by_index(device, 0);
+ devm_free_irq(&pdev->dev, msix->vector, chan);
+ pci_disable_msix(pdev);
+ break;
+
+ case IOAT_MSI:
+ chan = ioat_chan_by_index(device, 0);
+ devm_free_irq(&pdev->dev, pdev->irq, chan);
+ pci_disable_msi(pdev);
+ break;
+
+ case IOAT_INTX:
+ chan = ioat_chan_by_index(device, 0);
+ devm_free_irq(&pdev->dev, pdev->irq, chan);
+ break;
+
+ default:
+ return 0;
+ }
+
+ device->irq_mode = IOAT_NOIRQ;
+
+ err = ioat_dma_setup_interrupts(device);
+
+ return err;
+}
+
static int ioat3_reset_hw(struct ioat_chan_common *chan)
{
/* throw away whatever the channel was doing and get it
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
- /* clear any pending errors */
- err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
+ if (device->version < IOAT_VER_3_3) {
+ /* clear any pending errors */
+ err = pci_read_config_dword(pdev,
+ IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
+ if (err) {
+ dev_err(&pdev->dev,
+ "channel error register unreachable\n");
+ return err;
+ }
+ pci_write_config_dword(pdev,
+ IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
+
+ /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+ * (workaround for spurious config parity error after restart)
+ */
+ pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
+ if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
+ pci_write_config_dword(pdev,
+ IOAT_PCI_DMAUNCERRSTS_OFFSET,
+ 0x10);
+ }
+ }
+
+ err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
if (err) {
- dev_err(&pdev->dev, "channel error register unreachable\n");
+ dev_err(&pdev->dev, "Failed to reset!\n");
return err;
}
- pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
- /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
- * (workaround for spurious config parity error after restart)
- */
- pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
- if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
- pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
+ if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev))
+ err = ioat3_irq_reinit(device);
- return ioat2_reset_sync(chan, msecs_to_jiffies(200));
+ return err;
}
-static bool is_jf_ioat(struct pci_dev *pdev)
+static void ioat3_intr_quirk(struct ioatdma_device *device)
{
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
- return true;
- default:
- return false;
- }
-}
+ struct dma_device *dma;
+ struct dma_chan *c;
+ struct ioat_chan_common *chan;
+ u32 errmask;
-static bool is_snb_ioat(struct pci_dev *pdev)
-{
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
- return true;
- default:
- return false;
- }
-}
+ dma = &device->common;
-static bool is_ivb_ioat(struct pci_dev *pdev)
-{
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
- return true;
- default:
- return false;
+ /*
+ * if we have descriptor write back error status, we mask the
+ * error interrupts
+ */
+ if (device->cap & IOAT_CAP_DWBES) {
+ list_for_each_entry(c, &dma->channels, device_node) {
+ chan = to_chan_common(c);
+ errmask = readl(chan->reg_base +
+ IOAT_CHANERR_MASK_OFFSET);
+ errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
+ IOAT_CHANERR_XOR_Q_ERR;
+ writel(errmask, chan->reg_base +
+ IOAT_CHANERR_MASK_OFFSET);
+ }
}
-
}
int ioat3_dma_probe(struct ioatdma_device *device, int dca)
struct ioat_chan_common *chan;
bool is_raid_device = false;
int err;
- u32 cap;
device->enumerate_channels = ioat2_enumerate_channels;
device->reset_hw = ioat3_reset_hw;
device->self_test = ioat3_dma_self_test;
+ device->intr_quirk = ioat3_intr_quirk;
dma = &device->common;
dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
dma->device_issue_pending = ioat2_issue_pending;
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- if (is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev))
+ if (is_xeon_cb32(pdev))
dma->copy_align = 6;
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
- cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
+ device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
+
+ if (is_bwd_noraid(pdev))
+ device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
/* dca is incompatible with raid operations */
- if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
- cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
+ if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
+ device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
- if (cap & IOAT_CAP_XOR) {
+ if (device->cap & IOAT_CAP_XOR) {
is_raid_device = true;
dma->max_xor = 8;
dma->xor_align = 6;
dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
}
- if (cap & IOAT_CAP_PQ) {
+
+ if (device->cap & IOAT_CAP_PQ) {
is_raid_device = true;
- dma_set_maxpq(dma, 8, 0);
- dma->pq_align = 6;
- dma_cap_set(DMA_PQ, dma->cap_mask);
dma->device_prep_dma_pq = ioat3_prep_pq;
-
- dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
+ dma_cap_set(DMA_PQ, dma->cap_mask);
+ dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
- if (!(cap & IOAT_CAP_XOR)) {
- dma->max_xor = 8;
- dma->xor_align = 6;
+ if (device->cap & IOAT_CAP_RAID16SS) {
+ dma_set_maxpq(dma, 16, 0);
+ dma->pq_align = 0;
+ } else {
+ dma_set_maxpq(dma, 8, 0);
+ if (is_xeon_cb32(pdev))
+ dma->pq_align = 6;
+ else
+ dma->pq_align = 0;
+ }
- dma_cap_set(DMA_XOR, dma->cap_mask);
+ if (!(device->cap & IOAT_CAP_XOR)) {
dma->device_prep_dma_xor = ioat3_prep_pqxor;
-
- dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
+ dma_cap_set(DMA_XOR, dma->cap_mask);
+ dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
+
+ if (device->cap & IOAT_CAP_RAID16SS) {
+ dma->max_xor = 16;
+ dma->xor_align = 0;
+ } else {
+ dma->max_xor = 8;
+ if (is_xeon_cb32(pdev))
+ dma->xor_align = 6;
+ else
+ dma->xor_align = 0;
+ }
}
}
- if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) {
+
+ if (is_raid_device && (device->cap & IOAT_CAP_FILL_BLOCK)) {
dma_cap_set(DMA_MEMSET, dma->cap_mask);
dma->device_prep_dma_memset = ioat3_prep_memset_lock;
}
- if (is_raid_device) {
- dma->device_tx_status = ioat3_tx_status;
- device->cleanup_fn = ioat3_cleanup_event;
- device->timer_fn = ioat3_timer_event;
- } else {
- dma->device_tx_status = ioat_dma_tx_status;
- device->cleanup_fn = ioat2_cleanup_event;
- device->timer_fn = ioat2_timer_event;
+ dma->device_tx_status = ioat3_tx_status;
+ device->cleanup_fn = ioat3_cleanup_event;
+ device->timer_fn = ioat3_timer_event;
+
+ if (is_xeon_cb32(pdev)) {
+ dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
+ dma->device_prep_dma_xor_val = NULL;
+
+ dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
+ dma->device_prep_dma_pq_val = NULL;
}
- #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
- dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
- dma->device_prep_dma_pq_val = NULL;
- #endif
+ /* starting with CB3.3 super extended descriptors are supported */
+ if (device->cap & IOAT_CAP_RAID16SS) {
+ char pool_name[14];
+ int i;
+
+ /* allocate sw descriptor pool for SED */
+ device->sed_pool = kmem_cache_create("ioat_sed",
+ sizeof(struct ioat_sed_ent), 0, 0, NULL);
+ if (!device->sed_pool)
+ return -ENOMEM;
+
+ for (i = 0; i < MAX_SED_POOLS; i++) {
+ snprintf(pool_name, 14, "ioat_hw%d_sed", i);
- #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
- dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
- dma->device_prep_dma_xor_val = NULL;
- #endif
+ /* allocate SED DMA pool */
+ device->sed_hw_pool[i] = dma_pool_create(pool_name,
+ &pdev->dev,
+ SED_SIZE * (i + 1), 64, 0);
+ if (!device->sed_hw_pool[i])
+ goto sed_pool_cleanup;
+
+ }
+ }
err = ioat_probe(device);
if (err)
device->dca = ioat3_dca_init(pdev, device->reg_base);
return 0;
+
+sed_pool_cleanup:
+ if (device->sed_pool) {
+ int i;
+ kmem_cache_destroy(device->sed_pool);
+
+ for (i = 0; i < MAX_SED_POOLS; i++)
+ if (device->sed_hw_pool[i])
+ dma_pool_destroy(device->sed_hw_pool[i]);
+ }
+
+ return -ENOMEM;
+}
+
+void ioat3_dma_remove(struct ioatdma_device *device)
+{
+ if (device->sed_pool) {
+ int i;
+ kmem_cache_destroy(device->sed_pool);
+
+ for (i = 0; i < MAX_SED_POOLS; i++)
+ if (device->sed_hw_pool[i])
+ dma_pool_destroy(device->sed_hw_pool[i]);
+ }
}
#define IOAT_PCI_DID_SCNB 0x65FF
#define IOAT_PCI_DID_SNB 0x402F
-#define IOAT_VER_1_2 0x12 /* Version 1.2 */
-#define IOAT_VER_2_0 0x20 /* Version 2.0 */
-#define IOAT_VER_3_0 0x30 /* Version 3.0 */
-#define IOAT_VER_3_2 0x32 /* Version 3.2 */
-
#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20
#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21
#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22
#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e
#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW0 0x2f20
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW1 0x2f21
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW2 0x2f22
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW3 0x2f23
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW4 0x2f24
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW5 0x2f25
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW6 0x2f26
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW7 0x2f27
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW8 0x2f2e
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW9 0x2f2f
+
+#define PCI_DEVICE_ID_INTEL_IOAT_BWD0 0x0C50
+#define PCI_DEVICE_ID_INTEL_IOAT_BWD1 0x0C51
+#define PCI_DEVICE_ID_INTEL_IOAT_BWD2 0x0C52
+#define PCI_DEVICE_ID_INTEL_IOAT_BWD3 0x0C53
+
+#define IOAT_VER_1_2 0x12 /* Version 1.2 */
+#define IOAT_VER_2_0 0x20 /* Version 2.0 */
+#define IOAT_VER_3_0 0x30 /* Version 3.0 */
+#define IOAT_VER_3_2 0x32 /* Version 3.2 */
+#define IOAT_VER_3_3 0x33 /* Version 3.3 */
+
+
int system_has_dca_enabled(struct pci_dev *pdev);
struct ioat_dma_descriptor {
};
struct ioat_pq_descriptor {
- uint32_t size;
+ union {
+ uint32_t size;
+ uint32_t dwbes;
+ struct {
+ unsigned int rsvd:25;
+ unsigned int p_val_err:1;
+ unsigned int q_val_err:1;
+ unsigned int rsvd1:4;
+ unsigned int wbes:1;
+ } dwbes_f;
+ };
union {
uint32_t ctl;
struct {
unsigned int hint:1;
unsigned int p_disable:1;
unsigned int q_disable:1;
- unsigned int rsvd:11;
+ unsigned int rsvd2:2;
+ unsigned int wb_en:1;
+ unsigned int prl_en:1;
+ unsigned int rsvd3:7;
#define IOAT_OP_PQ 0x89
#define IOAT_OP_PQ_VAL 0x8a
+ #define IOAT_OP_PQ_16S 0xa0
+ #define IOAT_OP_PQ_VAL_16S 0xa1
unsigned int op:8;
} ctl_f;
};
uint64_t p_addr;
uint64_t next;
uint64_t src_addr2;
- uint64_t src_addr3;
+ union {
+ uint64_t src_addr3;
+ uint64_t sed_addr;
+ };
uint8_t coef[8];
uint64_t q_addr;
};
struct ioat_raw_descriptor {
uint64_t field[8];
};
+
+struct ioat_pq16a_descriptor {
+ uint8_t coef[8];
+ uint64_t src_addr3;
+ uint64_t src_addr4;
+ uint64_t src_addr5;
+ uint64_t src_addr6;
+ uint64_t src_addr7;
+ uint64_t src_addr8;
+ uint64_t src_addr9;
+};
+
+struct ioat_pq16b_descriptor {
+ uint64_t src_addr10;
+ uint64_t src_addr11;
+ uint64_t src_addr12;
+ uint64_t src_addr13;
+ uint64_t src_addr14;
+ uint64_t src_addr15;
+ uint64_t src_addr16;
+ uint64_t rsvd;
+};
+
+union ioat_sed_pq_descriptor {
+ struct ioat_pq16a_descriptor a;
+ struct ioat_pq16b_descriptor b;
+};
+
+#define SED_SIZE 64
+
+struct ioat_sed_raw_descriptor {
+ uint64_t a[8];
+ uint64_t b[8];
+ uint64_t c[8];
+};
+
#endif
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
+
+ /* I/OAT v3.3 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
+
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
if (!device)
return;
+ if (device->version >= IOAT_VER_3_0)
+ ioat3_dma_remove(device);
+
dev_err(&pdev->dev, "Removing dma and dca services\n");
if (device->dca) {
unregister_dca_provider(device->dca, &pdev->dev);
#define IOAT_CAP_APIC 0x00000080
#define IOAT_CAP_XOR 0x00000100
#define IOAT_CAP_PQ 0x00000200
+#define IOAT_CAP_DWBES 0x00002000
+#define IOAT_CAP_RAID16SS 0x00020000
#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */
#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
#define IOAT_CHANCTRL_INT_REARM 0x0001
#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\
+ IOAT_CHANCTRL_ERR_INT_EN |\
+ IOAT_CHANCTRL_ERR_COMPLETION_EN |\
IOAT_CHANCTRL_ANY_ERR_ABORT_EN)
#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */
return dma_async_device_register(&idmac->dma);
}
-static void __exit ipu_idmac_exit(struct ipu *ipu)
+static void ipu_idmac_exit(struct ipu *ipu)
{
int i;
struct idmac *idmac = &ipu->idmac;
return ret;
}
-static int __exit ipu_remove(struct platform_device *pdev)
+static int ipu_remove(struct platform_device *pdev)
{
struct ipu *ipu = platform_get_drvdata(pdev);
.name = "ipu-core",
.owner = THIS_MODULE,
},
- .remove = __exit_p(ipu_remove),
+ .remove = ipu_remove,
};
static int __init ipu_init(void)
#include <linux/device.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/rculist.h>
+#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_dma.h>
static LIST_HEAD(of_dma_list);
-static DEFINE_SPINLOCK(of_dma_lock);
+static DEFINE_MUTEX(of_dma_lock);
/**
- * of_dma_get_controller - Get a DMA controller in DT DMA helpers list
+ * of_dma_find_controller - Get a DMA controller in DT DMA helpers list
* @dma_spec: pointer to DMA specifier as found in the device tree
*
* Finds a DMA controller with matching device node and number for dma cells
- * in a list of registered DMA controllers. If a match is found the use_count
- * variable is increased and a valid pointer to the DMA data stored is retuned.
- * A NULL pointer is returned if no match is found.
+ * in a list of registered DMA controllers. If a match is found a valid pointer
+ * to the DMA data stored is retuned. A NULL pointer is returned if no match is
+ * found.
*/
-static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec)
+static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec)
{
struct of_dma *ofdma;
- spin_lock(&of_dma_lock);
-
- if (list_empty(&of_dma_list)) {
- spin_unlock(&of_dma_lock);
- return NULL;
- }
-
list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
if ((ofdma->of_node == dma_spec->np) &&
- (ofdma->of_dma_nbcells == dma_spec->args_count)) {
- ofdma->use_count++;
- spin_unlock(&of_dma_lock);
+ (ofdma->of_dma_nbcells == dma_spec->args_count))
return ofdma;
- }
-
- spin_unlock(&of_dma_lock);
pr_debug("%s: can't find DMA controller %s\n", __func__,
dma_spec->np->full_name);
return NULL;
}
-/**
- * of_dma_put_controller - Decrement use count for a registered DMA controller
- * @of_dma: pointer to DMA controller data
- *
- * Decrements the use_count variable in the DMA data structure. This function
- * should be called only when a valid pointer is returned from
- * of_dma_get_controller() and no further accesses to data referenced by that
- * pointer are needed.
- */
-static void of_dma_put_controller(struct of_dma *ofdma)
-{
- spin_lock(&of_dma_lock);
- ofdma->use_count--;
- spin_unlock(&of_dma_lock);
-}
-
/**
* of_dma_controller_register - Register a DMA controller to DT DMA helpers
* @np: device node of DMA controller
{
struct of_dma *ofdma;
int nbcells;
+ const __be32 *prop;
if (!np || !of_dma_xlate) {
pr_err("%s: not enough information provided\n", __func__);
if (!ofdma)
return -ENOMEM;
- nbcells = be32_to_cpup(of_get_property(np, "#dma-cells", NULL));
- if (!nbcells) {
+ prop = of_get_property(np, "#dma-cells", NULL);
+ if (prop)
+ nbcells = be32_to_cpup(prop);
+
+ if (!prop || !nbcells) {
pr_err("%s: #dma-cells property is missing or invalid\n",
__func__);
kfree(ofdma);
ofdma->of_dma_nbcells = nbcells;
ofdma->of_dma_xlate = of_dma_xlate;
ofdma->of_dma_data = data;
- ofdma->use_count = 0;
/* Now queue of_dma controller structure in list */
- spin_lock(&of_dma_lock);
+ mutex_lock(&of_dma_lock);
list_add_tail(&ofdma->of_dma_controllers, &of_dma_list);
- spin_unlock(&of_dma_lock);
+ mutex_unlock(&of_dma_lock);
return 0;
}
*
* Memory allocated by of_dma_controller_register() is freed here.
*/
-int of_dma_controller_free(struct device_node *np)
+void of_dma_controller_free(struct device_node *np)
{
struct of_dma *ofdma;
- spin_lock(&of_dma_lock);
-
- if (list_empty(&of_dma_list)) {
- spin_unlock(&of_dma_lock);
- return -ENODEV;
- }
+ mutex_lock(&of_dma_lock);
list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
if (ofdma->of_node == np) {
- if (ofdma->use_count) {
- spin_unlock(&of_dma_lock);
- return -EBUSY;
- }
-
list_del(&ofdma->of_dma_controllers);
- spin_unlock(&of_dma_lock);
kfree(ofdma);
- return 0;
+ break;
}
- spin_unlock(&of_dma_lock);
- return -ENODEV;
+ mutex_unlock(&of_dma_lock);
}
EXPORT_SYMBOL_GPL(of_dma_controller_free);
* specifiers, matches the name provided. Returns 0 if the name matches and
* a valid pointer to the DMA specifier is found. Otherwise returns -ENODEV.
*/
-static int of_dma_match_channel(struct device_node *np, char *name, int index,
- struct of_phandle_args *dma_spec)
+static int of_dma_match_channel(struct device_node *np, const char *name,
+ int index, struct of_phandle_args *dma_spec)
{
const char *s;
* Returns pointer to appropriate dma channel on success or NULL on error.
*/
struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
- char *name)
+ const char *name)
{
struct of_phandle_args dma_spec;
struct of_dma *ofdma;
if (of_dma_match_channel(np, name, i, &dma_spec))
continue;
- ofdma = of_dma_get_controller(&dma_spec);
-
- if (!ofdma)
- continue;
+ mutex_lock(&of_dma_lock);
+ ofdma = of_dma_find_controller(&dma_spec);
- chan = ofdma->of_dma_xlate(&dma_spec, ofdma);
+ if (ofdma)
+ chan = ofdma->of_dma_xlate(&dma_spec, ofdma);
+ else
+ chan = NULL;
- of_dma_put_controller(ofdma);
+ mutex_unlock(&of_dma_lock);
of_node_put(dma_spec.np);
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/of_dma.h>
+#include <linux/of_device.h>
#include "virt-dma.h"
[OMAP_DMA_DATA_TYPE_S32] = 4,
};
+static struct of_dma_filter_info omap_dma_info = {
+ .filter_fn = omap_dma_filter_fn,
+};
+
static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
{
return container_of(d, struct omap_dmadev, ddev);
pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
rc);
omap_dma_free(od);
- } else {
- platform_set_drvdata(pdev, od);
+ return rc;
+ }
+
+ platform_set_drvdata(pdev, od);
+
+ if (pdev->dev.of_node) {
+ omap_dma_info.dma_cap = od->ddev.cap_mask;
+
+ /* Device-tree DMA controller registration */
+ rc = of_dma_controller_register(pdev->dev.of_node,
+ of_dma_simple_xlate, &omap_dma_info);
+ if (rc) {
+ pr_warn("OMAP-DMA: failed to register DMA controller\n");
+ dma_async_device_unregister(&od->ddev);
+ omap_dma_free(od);
+ }
}
dev_info(&pdev->dev, "OMAP DMA engine driver\n");
{
struct omap_dmadev *od = platform_get_drvdata(pdev);
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
dma_async_device_unregister(&od->ddev);
omap_dma_free(od);
return 0;
}
+static const struct of_device_id omap_dma_match[] = {
+ { .compatible = "ti,omap2420-sdma", },
+ { .compatible = "ti,omap2430-sdma", },
+ { .compatible = "ti,omap3430-sdma", },
+ { .compatible = "ti,omap3630-sdma", },
+ { .compatible = "ti,omap4430-sdma", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_dma_match);
+
static struct platform_driver omap_dma_driver = {
.probe = omap_dma_probe,
.remove = omap_dma_remove,
.driver = {
.name = "omap-dma-engine",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(omap_dma_match),
},
};
dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
if (!ret) {
- ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
+ ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
if (ret) {
spin_lock(&pd_chan->lock);
pd_chan->descs_allocated++;
#include <linux/scatterlist.h>
#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/err.h>
#include "dmaengine.h"
#define PL330_MAX_CHAN 8
/* If already submitted */
if (desc->status == BUSY)
- break;
+ continue;
ret = pl330_submit_req(pch->pl330_chid,
&desc->req);
if (!ret) {
desc->status = BUSY;
- break;
} else if (ret == -EAGAIN) {
/* QFull or DMAC Dying */
break;
pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
res = &adev->res;
- pi->base = devm_request_and_ioremap(&adev->dev, res);
- if (!pi->base)
- return -ENXIO;
+ pi->base = devm_ioremap_resource(&adev->dev, res);
+ if (IS_ERR(pi->base))
+ return PTR_ERR(pi->base);
amba_set_drvdata(adev, pdmac);
--- /dev/null
+#
+# DMA engine configuration for sh
+#
+
+config SH_DMAE_BASE
+ bool "Renesas SuperH DMA Engine support"
+ depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE)
+ depends on !SH_DMA_API
+ default y
+ select DMA_ENGINE
+ help
+ Enable support for the Renesas SuperH DMA controllers.
+
+config SH_DMAE
+ tristate "Renesas SuperH DMAC support"
+ depends on SH_DMAE_BASE
+ help
+ Enable support for the Renesas SuperH DMA controllers.
+
+config SUDMAC
+ tristate "Renesas SUDMAC support"
+ depends on SH_DMAE_BASE
+ help
+ Enable support for the Renesas SUDMAC controllers.
-obj-$(CONFIG_SH_DMAE) += shdma-base.o
+obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o
obj-$(CONFIG_SH_DMAE) += shdma.o
+obj-$(CONFIG_SUDMAC) += sudmac.o
--- /dev/null
+/*
+ * Renesas SUDMAC support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * based on drivers/dma/sh/shdma.c:
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/sudmac.h>
+
+struct sudmac_chan {
+ struct shdma_chan shdma_chan;
+ void __iomem *base;
+ char dev_id[16]; /* unique name per DMAC of channel */
+
+ u32 offset; /* for CFG, BA, BBC, CA, CBC, DEN */
+ u32 cfg;
+ u32 dint_end_bit;
+};
+
+struct sudmac_device {
+ struct shdma_dev shdma_dev;
+ struct sudmac_pdata *pdata;
+ void __iomem *chan_reg;
+};
+
+struct sudmac_regs {
+ u32 base_addr;
+ u32 base_byte_count;
+};
+
+struct sudmac_desc {
+ struct sudmac_regs hw;
+ struct shdma_desc shdma_desc;
+};
+
+#define to_chan(schan) container_of(schan, struct sudmac_chan, shdma_chan)
+#define to_desc(sdesc) container_of(sdesc, struct sudmac_desc, shdma_desc)
+#define to_sdev(sc) container_of(sc->shdma_chan.dma_chan.device, \
+ struct sudmac_device, shdma_dev.dma_dev)
+
+/* SUDMAC register */
+#define SUDMAC_CH0CFG 0x00
+#define SUDMAC_CH0BA 0x10
+#define SUDMAC_CH0BBC 0x18
+#define SUDMAC_CH0CA 0x20
+#define SUDMAC_CH0CBC 0x28
+#define SUDMAC_CH0DEN 0x30
+#define SUDMAC_DSTSCLR 0x38
+#define SUDMAC_DBUFCTRL 0x3C
+#define SUDMAC_DINTCTRL 0x40
+#define SUDMAC_DINTSTS 0x44
+#define SUDMAC_DINTSTSCLR 0x48
+#define SUDMAC_CH0SHCTRL 0x50
+
+/* Definitions for the sudmac_channel.config */
+#define SUDMAC_SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */
+#define SUDMAC_RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */
+#define SUDMAC_LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */
+
+/* Definitions for the sudmac_channel.dint_end_bit */
+#define SUDMAC_CH1ENDE 0x0002 /* b1: Ch1 DMA Transfer End Int Enable */
+#define SUDMAC_CH0ENDE 0x0001 /* b0: Ch0 DMA Transfer End Int Enable */
+
+#define SUDMAC_DRV_NAME "sudmac"
+
+static void sudmac_writel(struct sudmac_chan *sc, u32 data, u32 reg)
+{
+ iowrite32(data, sc->base + reg);
+}
+
+static u32 sudmac_readl(struct sudmac_chan *sc, u32 reg)
+{
+ return ioread32(sc->base + reg);
+}
+
+static bool sudmac_is_busy(struct sudmac_chan *sc)
+{
+ u32 den = sudmac_readl(sc, SUDMAC_CH0DEN + sc->offset);
+
+ if (den)
+ return true; /* working */
+
+ return false; /* waiting */
+}
+
+static void sudmac_set_reg(struct sudmac_chan *sc, struct sudmac_regs *hw,
+ struct shdma_desc *sdesc)
+{
+ sudmac_writel(sc, sc->cfg, SUDMAC_CH0CFG + sc->offset);
+ sudmac_writel(sc, hw->base_addr, SUDMAC_CH0BA + sc->offset);
+ sudmac_writel(sc, hw->base_byte_count, SUDMAC_CH0BBC + sc->offset);
+}
+
+static void sudmac_start(struct sudmac_chan *sc)
+{
+ u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL);
+
+ sudmac_writel(sc, dintctrl | sc->dint_end_bit, SUDMAC_DINTCTRL);
+ sudmac_writel(sc, 1, SUDMAC_CH0DEN + sc->offset);
+}
+
+static void sudmac_start_xfer(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct sudmac_chan *sc = to_chan(schan);
+ struct sudmac_desc *sd = to_desc(sdesc);
+
+ sudmac_set_reg(sc, &sd->hw, sdesc);
+ sudmac_start(sc);
+}
+
+static bool sudmac_channel_busy(struct shdma_chan *schan)
+{
+ struct sudmac_chan *sc = to_chan(schan);
+
+ return sudmac_is_busy(sc);
+}
+
+static void sudmac_setup_xfer(struct shdma_chan *schan, int slave_id)
+{
+}
+
+static const struct sudmac_slave_config *sudmac_find_slave(
+ struct sudmac_chan *sc, int slave_id)
+{
+ struct sudmac_device *sdev = to_sdev(sc);
+ struct sudmac_pdata *pdata = sdev->pdata;
+ const struct sudmac_slave_config *cfg;
+ int i;
+
+ for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+ if (cfg->slave_id == slave_id)
+ return cfg;
+
+ return NULL;
+}
+
+static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, bool try)
+{
+ struct sudmac_chan *sc = to_chan(schan);
+ const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id);
+
+ if (!cfg)
+ return -ENODEV;
+
+ return 0;
+}
+
+static inline void sudmac_dma_halt(struct sudmac_chan *sc)
+{
+ u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL);
+
+ sudmac_writel(sc, 0, SUDMAC_CH0DEN + sc->offset);
+ sudmac_writel(sc, dintctrl & ~sc->dint_end_bit, SUDMAC_DINTCTRL);
+ sudmac_writel(sc, sc->dint_end_bit, SUDMAC_DINTSTSCLR);
+}
+
+static int sudmac_desc_setup(struct shdma_chan *schan,
+ struct shdma_desc *sdesc,
+ dma_addr_t src, dma_addr_t dst, size_t *len)
+{
+ struct sudmac_chan *sc = to_chan(schan);
+ struct sudmac_desc *sd = to_desc(sdesc);
+
+ dev_dbg(sc->shdma_chan.dev, "%s: src=%x, dst=%x, len=%d\n",
+ __func__, src, dst, *len);
+
+ if (*len > schan->max_xfer_len)
+ *len = schan->max_xfer_len;
+
+ if (dst)
+ sd->hw.base_addr = dst;
+ else if (src)
+ sd->hw.base_addr = src;
+ sd->hw.base_byte_count = *len;
+
+ return 0;
+}
+
+static void sudmac_halt(struct shdma_chan *schan)
+{
+ struct sudmac_chan *sc = to_chan(schan);
+
+ sudmac_dma_halt(sc);
+}
+
+static bool sudmac_chan_irq(struct shdma_chan *schan, int irq)
+{
+ struct sudmac_chan *sc = to_chan(schan);
+ u32 dintsts = sudmac_readl(sc, SUDMAC_DINTSTS);
+
+ if (!(dintsts & sc->dint_end_bit))
+ return false;
+
+ /* DMA stop */
+ sudmac_dma_halt(sc);
+
+ return true;
+}
+
+static size_t sudmac_get_partial(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct sudmac_chan *sc = to_chan(schan);
+ struct sudmac_desc *sd = to_desc(sdesc);
+ u32 current_byte_count = sudmac_readl(sc, SUDMAC_CH0CBC + sc->offset);
+
+ return sd->hw.base_byte_count - current_byte_count;
+}
+
+static bool sudmac_desc_completed(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct sudmac_chan *sc = to_chan(schan);
+ struct sudmac_desc *sd = to_desc(sdesc);
+ u32 current_addr = sudmac_readl(sc, SUDMAC_CH0CA + sc->offset);
+
+ return sd->hw.base_addr + sd->hw.base_byte_count == current_addr;
+}
+
+static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq,
+ unsigned long flags)
+{
+ struct shdma_dev *sdev = &su_dev->shdma_dev;
+ struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
+ struct sudmac_chan *sc;
+ struct shdma_chan *schan;
+ int err;
+
+ sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL);
+ if (!sc) {
+ dev_err(sdev->dma_dev.dev,
+ "No free memory for allocating dma channels!\n");
+ return -ENOMEM;
+ }
+
+ schan = &sc->shdma_chan;
+ schan->max_xfer_len = 64 * 1024 * 1024 - 1;
+
+ shdma_chan_probe(sdev, schan, id);
+
+ sc->base = su_dev->chan_reg;
+
+ /* get platform_data */
+ sc->offset = su_dev->pdata->channel->offset;
+ if (su_dev->pdata->channel->config & SUDMAC_TX_BUFFER_MODE)
+ sc->cfg |= SUDMAC_SENDBUFM;
+ if (su_dev->pdata->channel->config & SUDMAC_RX_END_MODE)
+ sc->cfg |= SUDMAC_RCVENDM;
+ sc->cfg |= (su_dev->pdata->channel->wait << 4) & SUDMAC_LBA_WAIT;
+
+ if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH0)
+ sc->dint_end_bit |= SUDMAC_CH0ENDE;
+ if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH1)
+ sc->dint_end_bit |= SUDMAC_CH1ENDE;
+
+ /* set up channel irq */
+ if (pdev->id >= 0)
+ snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d.%d",
+ pdev->id, id);
+ else
+ snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d", id);
+
+ err = shdma_request_irq(schan, irq, flags, sc->dev_id);
+ if (err) {
+ dev_err(sdev->dma_dev.dev,
+ "DMA channel %d request_irq failed %d\n", id, err);
+ goto err_no_irq;
+ }
+
+ return 0;
+
+err_no_irq:
+ /* remove from dmaengine device node */
+ shdma_chan_remove(schan);
+ return err;
+}
+
+static void sudmac_chan_remove(struct sudmac_device *su_dev)
+{
+ struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
+ struct shdma_chan *schan;
+ int i;
+
+ shdma_for_each_chan(schan, &su_dev->shdma_dev, i) {
+ struct sudmac_chan *sc = to_chan(schan);
+
+ BUG_ON(!schan);
+
+ shdma_free_irq(&sc->shdma_chan);
+ shdma_chan_remove(schan);
+ }
+ dma_dev->chancnt = 0;
+}
+
+static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan)
+{
+ /* SUDMAC doesn't need the address */
+ return 0;
+}
+
+static struct shdma_desc *sudmac_embedded_desc(void *buf, int i)
+{
+ return &((struct sudmac_desc *)buf)[i].shdma_desc;
+}
+
+static const struct shdma_ops sudmac_shdma_ops = {
+ .desc_completed = sudmac_desc_completed,
+ .halt_channel = sudmac_halt,
+ .channel_busy = sudmac_channel_busy,
+ .slave_addr = sudmac_slave_addr,
+ .desc_setup = sudmac_desc_setup,
+ .set_slave = sudmac_set_slave,
+ .setup_xfer = sudmac_setup_xfer,
+ .start_xfer = sudmac_start_xfer,
+ .embedded_desc = sudmac_embedded_desc,
+ .chan_irq = sudmac_chan_irq,
+ .get_partial = sudmac_get_partial,
+};
+
+static int sudmac_probe(struct platform_device *pdev)
+{
+ struct sudmac_pdata *pdata = pdev->dev.platform_data;
+ int err, i;
+ struct sudmac_device *su_dev;
+ struct dma_device *dma_dev;
+ struct resource *chan, *irq_res;
+
+ /* get platform data */
+ if (!pdata)
+ return -ENODEV;
+
+ chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!chan || !irq_res)
+ return -ENODEV;
+
+ err = -ENOMEM;
+ su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device),
+ GFP_KERNEL);
+ if (!su_dev) {
+ dev_err(&pdev->dev, "Not enough memory\n");
+ return err;
+ }
+
+ dma_dev = &su_dev->shdma_dev.dma_dev;
+
+ su_dev->chan_reg = devm_request_and_ioremap(&pdev->dev, chan);
+ if (!su_dev->chan_reg)
+ return err;
+
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+ su_dev->shdma_dev.ops = &sudmac_shdma_ops;
+ su_dev->shdma_dev.desc_size = sizeof(struct sudmac_desc);
+ err = shdma_init(&pdev->dev, &su_dev->shdma_dev, pdata->channel_num);
+ if (err < 0)
+ return err;
+
+ /* platform data */
+ su_dev->pdata = pdev->dev.platform_data;
+
+ platform_set_drvdata(pdev, su_dev);
+
+ /* Create DMA Channel */
+ for (i = 0; i < pdata->channel_num; i++) {
+ err = sudmac_chan_probe(su_dev, i, irq_res->start, IRQF_SHARED);
+ if (err)
+ goto chan_probe_err;
+ }
+
+ err = dma_async_device_register(&su_dev->shdma_dev.dma_dev);
+ if (err < 0)
+ goto chan_probe_err;
+
+ return err;
+
+chan_probe_err:
+ sudmac_chan_remove(su_dev);
+
+ platform_set_drvdata(pdev, NULL);
+ shdma_cleanup(&su_dev->shdma_dev);
+
+ return err;
+}
+
+static int sudmac_remove(struct platform_device *pdev)
+{
+ struct sudmac_device *su_dev = platform_get_drvdata(pdev);
+ struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
+
+ dma_async_device_unregister(dma_dev);
+ sudmac_chan_remove(su_dev);
+ shdma_cleanup(&su_dev->shdma_dev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sudmac_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = SUDMAC_DRV_NAME,
+ },
+ .probe = sudmac_probe,
+ .remove = sudmac_remove,
+};
+module_platform_driver(sudmac_driver);
+
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_DESCRIPTION("Renesas SUDMAC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" SUDMAC_DRV_NAME);
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/clk.h>
#include <linux/sirfsoc_dma.h>
#include "dmaengine.h"
struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
void __iomem *base;
int irq;
+ struct clk *clk;
bool is_marco;
};
return -EINVAL;
}
+ sdma->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(sdma->clk)) {
+ dev_err(dev, "failed to get a clock.\n");
+ return PTR_ERR(sdma->clk);
+ }
+
ret = of_address_to_resource(dn, 0, &res);
if (ret) {
dev_err(dev, "Error parsing memory region!\n");
tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
+ clk_prepare_enable(sdma->clk);
+
/* Register DMA engine */
dev_set_drvdata(dev, sdma);
ret = dma_async_device_register(dma);
struct device *dev = &op->dev;
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+ clk_disable_unprepare(sdma->clk);
dma_async_device_unregister(&sdma->dma);
free_irq(sdma->irq, sdma);
irq_dispose_mapping(sdma->irq);
},
};
-module_platform_driver(sirfsoc_dma_driver);
+static __init int sirfsoc_dma_init(void)
+{
+ return platform_driver_register(&sirfsoc_dma_driver);
+}
+
+static void __exit sirfsoc_dma_exit(void)
+{
+ platform_driver_unregister(&sirfsoc_dma_driver);
+}
+
+subsys_initcall(sirfsoc_dma_init);
+module_exit(sirfsoc_dma_exit);
MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
"Barry Song <baohua.song@csr.com>");
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/clk/tegra.h>
/* Channel-slave specific configuration */
struct dma_slave_config dma_sconfig;
+ struct tegra_dma_channel_regs channel_reg;
};
/* tegra_dma: Tegra DMA specific information */
.support_channel_pause = false,
};
-#if defined(CONFIG_OF)
/* Tegra30 specific DMA controller information */
static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
.nr_channels = 32,
},
};
MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
-#endif
static int tegra_dma_probe(struct platform_device *pdev)
{
int ret;
int i;
const struct tegra_dma_chip_data *cdata = NULL;
+ const struct of_device_id *match;
- if (pdev->dev.of_node) {
- const struct of_device_id *match;
- match = of_match_device(of_match_ptr(tegra_dma_of_match),
- &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "Error: No device match found\n");
- return -ENODEV;
- }
- cdata = match->data;
- } else {
- /* If no device tree then fallback to tegra20 */
- cdata = &tegra20_dma_chip_data;
+ match = of_match_device(tegra_dma_of_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
}
+ cdata = match->data;
tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
sizeof(struct tegra_dma_channel), GFP_KERNEL);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int tegra_dma_pm_suspend(struct device *dev)
+{
+ struct tegra_dma *tdma = dev_get_drvdata(dev);
+ int i;
+ int ret;
+
+ /* Enable clock before accessing register */
+ ret = tegra_dma_runtime_resume(dev);
+ if (ret < 0)
+ return ret;
+
+ tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
+ for (i = 0; i < tdma->chip_data->nr_channels; i++) {
+ struct tegra_dma_channel *tdc = &tdma->channels[i];
+ struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
+
+ ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
+ ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
+ ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
+ ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
+ ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
+ }
+
+ /* Disable clock */
+ tegra_dma_runtime_suspend(dev);
+ return 0;
+}
+
+static int tegra_dma_pm_resume(struct device *dev)
+{
+ struct tegra_dma *tdma = dev_get_drvdata(dev);
+ int i;
+ int ret;
+
+ /* Enable clock before accessing register */
+ ret = tegra_dma_runtime_resume(dev);
+ if (ret < 0)
+ return ret;
+
+ tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
+ tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
+ tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
+
+ for (i = 0; i < tdma->chip_data->nr_channels; i++) {
+ struct tegra_dma_channel *tdc = &tdma->channels[i];
+ struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
+
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr);
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
+ (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
+ }
+
+ /* Disable clock */
+ tegra_dma_runtime_suspend(dev);
+ return 0;
+}
+#endif
+
static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
#ifdef CONFIG_PM_RUNTIME
.runtime_suspend = tegra_dma_runtime_suspend,
.runtime_resume = tegra_dma_runtime_resume,
#endif
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
};
static struct platform_driver tegra_dmac_driver = {
.name = "tegra-apbdma",
.owner = THIS_MODULE,
.pm = &tegra_dma_dev_pm_ops,
- .of_match_table = of_match_ptr(tegra_dma_of_match),
+ .of_match_table = tegra_dma_of_match,
},
.probe = tegra_dma_probe,
.remove = tegra_dma_remove,
.owner = THIS_MODULE,
},
.probe = td_probe,
- .remove = __exit_p(td_remove),
+ .remove = td_remove,
};
module_platform_driver(td_driver);
return 0;
}
-static int __exit txx9dmac_chan_remove(struct platform_device *pdev)
+static int txx9dmac_chan_remove(struct platform_device *pdev)
{
struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
return 0;
}
-static int __exit txx9dmac_remove(struct platform_device *pdev)
+static int txx9dmac_remove(struct platform_device *pdev)
{
struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
};
static struct platform_driver txx9dmac_chan_driver = {
- .remove = __exit_p(txx9dmac_chan_remove),
+ .remove = txx9dmac_chan_remove,
.driver = {
.name = "txx9dmac-chan",
},
};
static struct platform_driver txx9dmac_driver = {
- .remove = __exit_p(txx9dmac_remove),
+ .remove = txx9dmac_remove,
.shutdown = txx9dmac_shutdown,
.driver = {
.name = "txx9dmac",
/*
* various constants for Memory Controllers
*/
-static const char *mem_types[] = {
+static const char * const mem_types[] = {
[MEM_EMPTY] = "Empty",
[MEM_RESERVED] = "Reserved",
[MEM_UNKNOWN] = "Unknown",
[MEM_RDDR3] = "Registered-DDR3"
};
-static const char *dev_types[] = {
+static const char * const dev_types[] = {
[DEV_UNKNOWN] = "Unknown",
[DEV_X1] = "x1",
[DEV_X2] = "x2",
[DEV_X64] = "x64"
};
-static const char *edac_caps[] = {
+static const char * const edac_caps[] = {
[EDAC_UNKNOWN] = "Unknown",
[EDAC_NONE] = "None",
[EDAC_RESERVED] = "Reserved",
};
/* possible dynamic channel ce_count attribute files */
-DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch0_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 0);
-DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch1_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 1);
-DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch2_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 2);
-DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch3_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 3);
-DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 4);
-DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 5);
/* Total possible dynamic ce_count attribute file table */
config EXTCON_GPIO
tristate "GPIO extcon support"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Say Y here to enable GPIO based extcon support. Note that GPIO
extcon supports single state per extcon instance.
struct bus_reset_event *e;
e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (e == NULL) {
- fw_notice(client->device->card, "out of memory when allocating event\n");
+ if (e == NULL)
return;
- }
fill_bus_reset_event(&e->reset, client);
r = kmalloc(sizeof(*r), GFP_ATOMIC);
e = kmalloc(sizeof(*e), GFP_ATOMIC);
- if (r == NULL || e == NULL) {
- fw_notice(card, "out of memory when allocating event\n");
+ if (r == NULL || e == NULL)
goto failed;
- }
+
r->card = card;
r->request = request;
r->data = payload;
struct iso_interrupt_event *e;
e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
- if (e == NULL) {
- fw_notice(context->card, "out of memory when allocating event\n");
+ if (e == NULL)
return;
- }
+
e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
e->interrupt.closure = client->iso_closure;
e->interrupt.cycle = cycle;
struct iso_interrupt_mc_event *e;
e = kmalloc(sizeof(*e), GFP_ATOMIC);
- if (e == NULL) {
- fw_notice(context->card, "out of memory when allocating event\n");
+ if (e == NULL)
return;
- }
+
e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
e->interrupt.closure = client->iso_closure;
e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
int ret;
if ((request->channels == 0 && request->bandwidth == 0) ||
- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
- request->bandwidth < 0)
+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
return -EINVAL;
r = kmalloc(sizeof(*r), GFP_KERNEL);
list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
- if (e == NULL) {
- fw_notice(card, "out of memory when allocating event\n");
+ if (e == NULL)
break;
- }
+
e->phy_packet.closure = client->phy_receiver_closure;
e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
e->phy_packet.rcode = RCODE_COMPLETE;
* match the drivers id_tables against it.
*/
unit = kzalloc(sizeof(*unit), GFP_KERNEL);
- if (unit == NULL) {
- fw_err(device->card, "out of memory for unit\n");
+ if (unit == NULL)
continue;
- }
unit->directory = ci.p + value - 1;
unit->device.bus = &fw_bus_type;
}
new = kmalloc(sizeof(*new), GFP_ATOMIC);
- if (!new) {
- dev_err(&pd->skb->dev->dev, "out of memory\n");
+ if (!new)
return NULL;
- }
new->offset = offset;
new->len = len;
fail_w_new:
kfree(new);
fail:
- dev_err(&net->dev, "out of memory\n");
-
return NULL;
}
skb = dev_alloc_skb(len + LL_RESERVED_SPACE(net));
if (unlikely(!skb)) {
- dev_err(&net->dev, "out of memory\n");
net->stats.rx_dropped++;
return -ENOMEM;
#include "core.h"
#include "ohci.h"
+#define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
+#define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
+#define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
+
#define DESCRIPTOR_OUTPUT_MORE 0
#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
#define DESCRIPTOR_INPUT_MORE (2 << 12)
#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
#define DESCRIPTOR_WAIT (3 << 0)
+#define DESCRIPTOR_CMD (0xf << 12)
+
struct descriptor {
__le16 req_count;
__le16 control;
struct descriptor *last;
/*
- * The last descriptor in the DMA program. It contains the branch
+ * The last descriptor block in the DMA program. It contains the branch
* address that must be updated upon appending a new descriptor.
*/
struct descriptor *prev;
+ int prev_z;
descriptor_callback_t callback;
#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
#define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
#define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
+#define PCI_DEVICE_ID_VIA_VT630X 0x3044
#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
+#define PCI_REV_ID_VIA_VT6306 0x46
#define QUIRK_CYCLE_TIMER 1
#define QUIRK_RESET_PACKET 2
#define QUIRK_NO_1394A 8
#define QUIRK_NO_MSI 16
#define QUIRK_TI_SLLZ059 32
+#define QUIRK_IR_WAKE 64
+#define QUIRK_PHY_LCTRL_TIMEOUT 128
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
QUIRK_BE_HEADERS},
{PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
- QUIRK_NO_MSI},
+ QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI},
+
+ {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID,
+ QUIRK_PHY_LCTRL_TIMEOUT},
{PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
QUIRK_RESET_PACKET},
{PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_RESET_PACKET},
+ {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306,
+ QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
+
{PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
};
", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
", disable MSI = " __stringify(QUIRK_NO_MSI)
", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
+ ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
+ ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT)
")");
#define OHCI_PARAM_DEBUG_AT_AR 1
!(evt & OHCI1394_busReset))
return;
- dev_notice(ohci->card.device,
- "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
+ ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
evt & OHCI1394_selfIDComplete ? " selfID" : "",
evt & OHCI1394_RQPkt ? " AR_req" : "",
evt & OHCI1394_RSPkt ? " AR_resp" : "",
if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
return;
- dev_notice(ohci->card.device,
- "%d selfIDs, generation %d, local node ID %04x\n",
- self_id_count, generation, ohci->node_id);
+ ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
+ self_id_count, generation, ohci->node_id);
for (s = ohci->self_id_buffer; self_id_count--; ++s)
if ((*s & 1 << 23) == 0)
- dev_notice(ohci->card.device,
- "selfID 0: %08x, phy %d [%c%c%c] "
- "%s gc=%d %s %s%s%s\n",
+ ohci_notice(ohci,
+ "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
*s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
speed[*s >> 14 & 3], *s >> 16 & 63,
power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
*s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
else
- dev_notice(ohci->card.device,
+ ohci_notice(ohci,
"selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
*s, *s >> 24 & 63,
_p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
evt = 0x1f;
if (evt == OHCI1394_evt_bus_reset) {
- dev_notice(ohci->card.device,
- "A%c evt_bus_reset, generation %d\n",
- dir, (header[2] >> 16) & 0xff);
+ ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
+ dir, (header[2] >> 16) & 0xff);
return;
}
switch (tcode) {
case 0xa:
- dev_notice(ohci->card.device,
- "A%c %s, %s\n",
- dir, evts[evt], tcodes[tcode]);
+ ohci_notice(ohci, "A%c %s, %s\n",
+ dir, evts[evt], tcodes[tcode]);
break;
case 0xe:
- dev_notice(ohci->card.device,
- "A%c %s, PHY %08x %08x\n",
- dir, evts[evt], header[1], header[2]);
+ ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
+ dir, evts[evt], header[1], header[2]);
break;
case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
- dev_notice(ohci->card.device,
- "A%c spd %x tl %02x, "
- "%04x -> %04x, %s, "
- "%s, %04x%08x%s\n",
- dir, speed, header[0] >> 10 & 0x3f,
- header[1] >> 16, header[0] >> 16, evts[evt],
- tcodes[tcode], header[1] & 0xffff, header[2], specific);
+ ohci_notice(ohci,
+ "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n",
+ dir, speed, header[0] >> 10 & 0x3f,
+ header[1] >> 16, header[0] >> 16, evts[evt],
+ tcodes[tcode], header[1] & 0xffff, header[2], specific);
break;
default:
- dev_notice(ohci->card.device,
- "A%c spd %x tl %02x, "
- "%04x -> %04x, %s, "
- "%s%s\n",
- dir, speed, header[0] >> 10 & 0x3f,
- header[1] >> 16, header[0] >> 16, evts[evt],
- tcodes[tcode], specific);
+ ohci_notice(ohci,
+ "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
+ dir, speed, header[0] >> 10 & 0x3f,
+ header[1] >> 16, header[0] >> 16, evts[evt],
+ tcodes[tcode], specific);
}
}
if (i >= 3)
msleep(1);
}
- dev_err(ohci->card.device, "failed to read phy reg\n");
+ ohci_err(ohci, "failed to read phy reg %d\n", addr);
+ dump_stack();
return -EBUSY;
}
if (i >= 3)
msleep(1);
}
- dev_err(ohci->card.device, "failed to write phy reg\n");
+ ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val);
+ dump_stack();
return -EBUSY;
}
reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
flush_writes(ohci);
- dev_err(ohci->card.device, "AR error: %s; DMA stopped\n",
- error_msg);
+ ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg);
}
/* FIXME: restart? */
}
ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
ctx->last = ctx->buffer_tail->buffer;
ctx->prev = ctx->buffer_tail->buffer;
+ ctx->prev_z = 1;
return 0;
}
{
dma_addr_t d_bus;
struct descriptor_buffer *desc = ctx->buffer_tail;
+ struct descriptor *d_branch;
d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
desc->used += (z + extra) * sizeof(*d);
wmb(); /* finish init of new descriptors before branch_address update */
- ctx->prev->branch_address = cpu_to_le32(d_bus | z);
- ctx->prev = find_branch_descriptor(d, z);
+
+ d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z);
+ d_branch->branch_address = cpu_to_le32(d_bus | z);
+
+ /*
+ * VT6306 incorrectly checks only the single descriptor at the
+ * CommandPtr when the wake bit is written, so if it's a
+ * multi-descriptor block starting with an INPUT_MORE, put a copy of
+ * the branch address in the first descriptor.
+ *
+ * Not doing this for transmit contexts since not sure how it interacts
+ * with skip addresses.
+ */
+ if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) &&
+ d_branch != ctx->prev &&
+ (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) ==
+ cpu_to_le16(DESCRIPTOR_INPUT_MORE)) {
+ ctx->prev->branch_address = cpu_to_le32(d_bus | z);
+ }
+
+ ctx->prev = d;
+ ctx->prev_z = z;
}
static void context_stop(struct context *ctx)
if (i)
udelay(10);
}
- dev_err(ohci->card.device, "DMA context still active (0x%08x)\n", reg);
+ ohci_err(ohci, "DMA context still active (0x%08x)\n", reg);
}
struct driver_data {
goto out;
}
- dev_err(ohci->card.device, "swap not done (CSR lock timeout)\n");
+ ohci_err(ohci, "swap not done (CSR lock timeout)\n");
fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
out:
ctl = reg_read(ohci, CONTROL_SET(regs));
if (ctl & CONTEXT_DEAD)
- dev_err(ohci->card.device,
- "DMA context %s has stopped, error code: %s\n",
+ ohci_err(ohci, "DMA context %s has stopped, error code: %s\n",
name, evts[ctl & 0x1f]);
}
reg = reg_read(ohci, OHCI1394_NodeID);
if (!(reg & OHCI1394_NodeID_idValid)) {
- dev_notice(ohci->card.device,
- "node ID not valid, new bus reset in progress\n");
+ ohci_notice(ohci,
+ "node ID not valid, new bus reset in progress\n");
return -EBUSY;
}
self_id |= ((reg & 0x3f) << 24); /* phy ID */
reg = reg_read(ohci, OHCI1394_NodeID);
if (!(reg & OHCI1394_NodeID_idValid)) {
- dev_notice(ohci->card.device,
- "node ID not valid, new bus reset in progress\n");
+ ohci_notice(ohci,
+ "node ID not valid, new bus reset in progress\n");
return;
}
if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
- dev_notice(ohci->card.device, "malconfigured bus\n");
+ ohci_notice(ohci, "malconfigured bus\n");
return;
}
ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
reg = reg_read(ohci, OHCI1394_SelfIDCount);
if (reg & OHCI1394_SelfIDCount_selfIDError) {
- dev_notice(ohci->card.device, "inconsistent self IDs\n");
+ ohci_notice(ohci, "self ID receive error\n");
return;
}
/*
self_id_count = (reg >> 3) & 0xff;
if (self_id_count > 252) {
- dev_notice(ohci->card.device, "inconsistent self IDs\n");
+ ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
return;
}
rmb();
for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
- if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
+ u32 id = cond_le32_to_cpu(ohci->self_id_cpu[i]);
+ u32 id2 = cond_le32_to_cpu(ohci->self_id_cpu[i + 1]);
+
+ if (id != ~id2) {
/*
* If the invalid data looks like a cycle start packet,
* it's likely to be the result of the cycle master
* so far are valid and should be processed so that the
* bus manager can then correct the gap count.
*/
- if (cond_le32_to_cpu(ohci->self_id_cpu[i])
- == 0xffff008f) {
- dev_notice(ohci->card.device,
- "ignoring spurious self IDs\n");
+ if (id == 0xffff008f) {
+ ohci_notice(ohci, "ignoring spurious self IDs\n");
self_id_count = j;
break;
- } else {
- dev_notice(ohci->card.device,
- "inconsistent self IDs\n");
- return;
}
+
+ ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
+ j, self_id_count, id, id2);
+ return;
}
- ohci->self_id_buffer[j] =
- cond_le32_to_cpu(ohci->self_id_cpu[i]);
+ ohci->self_id_buffer[j] = id;
}
if (ohci->quirks & QUIRK_TI_SLLZ059) {
self_id_count = find_and_insert_self_id(ohci, self_id_count);
if (self_id_count < 0) {
- dev_notice(ohci->card.device,
- "could not construct local self ID\n");
+ ohci_notice(ohci,
+ "could not construct local self ID\n");
return;
}
}
if (self_id_count == 0) {
- dev_notice(ohci->card.device, "inconsistent self IDs\n");
+ ohci_notice(ohci, "no self IDs\n");
return;
}
rmb();
new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
if (new_generation != generation) {
- dev_notice(ohci->card.device,
- "new bus reset, discarding self ids\n");
+ ohci_notice(ohci, "new bus reset, discarding self ids\n");
return;
}
}
if (unlikely(event & OHCI1394_regAccessFail))
- dev_err(ohci->card.device, "register access failure\n");
+ ohci_err(ohci, "register access failure\n");
if (unlikely(event & OHCI1394_postedWriteErr)) {
reg_read(ohci, OHCI1394_PostedWriteAddressHi);
reg_write(ohci, OHCI1394_IntEventClear,
OHCI1394_postedWriteErr);
if (printk_ratelimit())
- dev_err(ohci->card.device, "PCI posted write error\n");
+ ohci_err(ohci, "PCI posted write error\n");
}
if (unlikely(event & OHCI1394_cycleTooLong)) {
if (printk_ratelimit())
- dev_notice(ohci->card.device,
- "isochronous cycle too long\n");
+ ohci_notice(ohci, "isochronous cycle too long\n");
reg_write(ohci, OHCI1394_LinkControlSet,
OHCI1394_LinkControl_cycleMaster);
}
* them at least two cycles later. (FIXME?)
*/
if (printk_ratelimit())
- dev_notice(ohci->card.device,
- "isochronous cycle inconsistent\n");
+ ohci_notice(ohci, "isochronous cycle inconsistent\n");
}
if (unlikely(event & OHCI1394_unrecoverableError))
const __be32 *config_rom, size_t length)
{
struct fw_ohci *ohci = fw_ohci(card);
- struct pci_dev *dev = to_pci_dev(card->device);
u32 lps, version, irqs;
int i, ret;
if (software_reset(ohci)) {
- dev_err(card->device, "failed to reset ohci card\n");
+ ohci_err(ohci, "failed to reset ohci card\n");
return -EBUSY;
}
* will lock up the machine. Wait 50msec to make sure we have
* full link enabled. However, with some cards (well, at least
* a JMicron PCIe card), we have to try again sometimes.
+ *
+ * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
+ * cannot actually use the phy at that time. These need tens of
+ * millisecods pause between LPS write and first phy access too.
+ *
+ * But do not wait for 50msec on Agere/LSI cards. Their phy
+ * arbitration state machine may time out during such a long wait.
*/
+
reg_write(ohci, OHCI1394_HCControlSet,
OHCI1394_HCControl_LPS |
OHCI1394_HCControl_postedWriteEnable);
flush_writes(ohci);
- for (lps = 0, i = 0; !lps && i < 3; i++) {
+ if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT))
msleep(50);
+
+ for (lps = 0, i = 0; !lps && i < 150; i++) {
+ msleep(1);
lps = reg_read(ohci, OHCI1394_HCControlSet) &
OHCI1394_HCControl_LPS;
}
if (!lps) {
- dev_err(card->device, "failed to set Link Power Status\n");
+ ohci_err(ohci, "failed to set Link Power Status\n");
return -EIO;
}
if (ret < 0)
return ret;
if (ret)
- dev_notice(card->device, "local TSB41BA3D phy\n");
+ ohci_notice(ohci, "local TSB41BA3D phy\n");
else
ohci->quirks &= ~QUIRK_TI_SLLZ059;
}
reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
- if (!(ohci->quirks & QUIRK_NO_MSI))
- pci_enable_msi(dev);
- if (request_irq(dev->irq, irq_handler,
- pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
- ohci_driver_name, ohci)) {
- dev_err(card->device, "failed to allocate interrupt %d\n",
- dev->irq);
- pci_disable_msi(dev);
-
- if (config_rom) {
- dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
- ohci->next_config_rom,
- ohci->next_config_rom_bus);
- ohci->next_config_rom = NULL;
- }
- return -EIO;
- }
-
irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
OHCI1394_RQPkt | OHCI1394_RSPkt |
OHCI1394_isochTx | OHCI1394_isochRx |
if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
- dev_err(&dev->dev, "invalid MMIO resource\n");
+ ohci_err(ohci, "invalid MMIO resource\n");
err = -ENXIO;
goto fail_disable;
}
err = pci_request_region(dev, 0, ohci_driver_name);
if (err) {
- dev_err(&dev->dev, "MMIO resource unavailable\n");
+ ohci_err(ohci, "MMIO resource unavailable\n");
goto fail_disable;
}
ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
if (ohci->registers == NULL) {
- dev_err(&dev->dev, "failed to remap registers\n");
+ ohci_err(ohci, "failed to remap registers\n");
err = -ENXIO;
goto fail_iomem;
}
guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
reg_read(ohci, OHCI1394_GUIDLo);
+ if (!(ohci->quirks & QUIRK_NO_MSI))
+ pci_enable_msi(dev);
+ if (request_irq(dev->irq, irq_handler,
+ pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
+ ohci_driver_name, ohci)) {
+ ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq);
+ err = -EIO;
+ goto fail_msi;
+ }
+
err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
if (err)
- goto fail_contexts;
+ goto fail_irq;
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
- dev_notice(&dev->dev,
- "added OHCI v%x.%x device as card %d, "
- "%d IR + %d IT contexts, quirks 0x%x\n",
- version >> 16, version & 0xff, ohci->card.index,
- ohci->n_ir, ohci->n_it, ohci->quirks);
+ ohci_notice(ohci,
+ "added OHCI v%x.%x device as card %d, "
+ "%d IR + %d IT contexts, quirks 0x%x\n",
+ version >> 16, version & 0xff, ohci->card.index,
+ ohci->n_ir, ohci->n_it, ohci->quirks);
return 0;
+ fail_irq:
+ free_irq(dev->irq, ohci);
+ fail_msi:
+ pci_disable_msi(dev);
fail_contexts:
kfree(ohci->ir_context_list);
kfree(ohci->it_context_list);
kfree(ohci);
pmac_ohci_off(dev);
fail:
- if (err == -ENOMEM)
- dev_err(&dev->dev, "out of memory\n");
-
return err;
}
static void pci_remove(struct pci_dev *dev)
{
- struct fw_ohci *ohci;
+ struct fw_ohci *ohci = pci_get_drvdata(dev);
- ohci = pci_get_drvdata(dev);
- reg_write(ohci, OHCI1394_IntMaskClear, ~0);
- flush_writes(ohci);
+ /*
+ * If the removal is happening from the suspend state, LPS won't be
+ * enabled and host registers (eg., IntMaskClear) won't be accessible.
+ */
+ if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) {
+ reg_write(ohci, OHCI1394_IntMaskClear, ~0);
+ flush_writes(ohci);
+ }
cancel_work_sync(&ohci->bus_reset_work);
fw_core_remove_card(&ohci->card);
int err;
software_reset(ohci);
- free_irq(dev->irq, ohci);
- pci_disable_msi(dev);
err = pci_save_state(dev);
if (err) {
- dev_err(&dev->dev, "pci_save_state failed\n");
+ ohci_err(ohci, "pci_save_state failed\n");
return err;
}
err = pci_set_power_state(dev, pci_choose_state(dev, state));
if (err)
- dev_err(&dev->dev, "pci_set_power_state failed with %d\n", err);
+ ohci_err(ohci, "pci_set_power_state failed with %d\n", err);
pmac_ohci_off(dev);
return 0;
pci_restore_state(dev);
err = pci_enable_device(dev);
if (err) {
- dev_err(&dev->dev, "pci_enable_device failed\n");
+ ohci_err(ohci, "pci_enable_device failed\n");
return err;
}
MODULE_LICENSE("GPL");
/* Provide a module alias so root-on-sbp2 initrds don't break. */
-#ifndef CONFIG_IEEE1394_OHCI1394_MODULE
MODULE_ALIAS("ohci1394");
-#endif
return -ENODEV;
if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
- BUG_ON(dma_set_max_seg_size(device->card->device,
- SBP2_MAX_SEG_SIZE));
+ WARN_ON(dma_set_max_seg_size(device->card->device,
+ SBP2_MAX_SEG_SIZE));
shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
if (shost == NULL)
}
orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
- if (orb == NULL) {
- dev_notice(lu_dev(lu), "failed to alloc ORB\n");
+ if (orb == NULL)
return SCSI_MLQUEUE_HOST_BUSY;
- }
/* Initialize rcode to something not RCODE_COMPLETE. */
orb->base.rcode = -1;
MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
/* Provide a module alias so root-on-sbp2 initrds don't break. */
-#ifndef CONFIG_IEEE1394_SBP2_MODULE
MODULE_ALIAS("sbp2");
-#endif
static int __init sbp2_init(void)
{
menuconfig GPIOLIB
bool "GPIO Support"
depends on ARCH_WANT_OPTIONAL_GPIOLIB || ARCH_REQUIRE_GPIOLIB
- select GENERIC_GPIO
help
This enables GPIO support through the generic GPIO library.
You only need to enable this, if you also want to enable
}
/*
- * GENERIC_GPIO primitives.
+ * GPIO primitives.
*/
static int lpc32xx_gpio_dir_input_p012(struct gpio_chip *chip,
unsigned pin)
config I2C_CBUS_GPIO
tristate "CBUS I2C driver"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Support for CBUS access using I2C API. Mostly relevant for Nokia
Internet Tablets (770, N800 and N810).
config I2C_GPIO
tristate "GPIO-based bitbanging I2C"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select I2C_ALGOBIT
help
This is a very simple bitbanging I2C driver utilizing the
config I2C_ARB_GPIO_CHALLENGE
tristate "GPIO-based I2C arbitration"
- depends on GENERIC_GPIO && OF
+ depends on GPIOLIB && OF
help
If you say yes to this option, support will be included for an
I2C multimaster arbitration scheme using GPIOs and a challenge &
config I2C_MUX_GPIO
tristate "GPIO-based I2C multiplexer"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
If you say yes to this option, support will be included for a
GPIO based I2C multiplexer. This driver provides access to
config KEYBOARD_GPIO
tristate "GPIO Buttons"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
This driver implements support for buttons connected
to GPIO pins of various CPUs (and some other chips).
config KEYBOARD_GPIO_POLLED
tristate "Polled GPIO buttons"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select INPUT_POLLDEV
help
This driver implements support for buttons connected
config KEYBOARD_MATRIX
tristate "GPIO driven matrix keypad support"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select INPUT_MATRIXKMAP
help
Enable support for GPIO driven matrix keypad.
config INPUT_GP2A
tristate "Sharp GP2AP002A00F I2C Proximity/Opto sensor driver"
depends on I2C
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Say Y here if you have a Sharp GP2AP002A00F proximity/als combo-chip
hooked to an I2C bus.
config INPUT_GPIO_TILT_POLLED
tristate "Polled GPIO tilt switch"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select INPUT_POLLDEV
help
This driver implements support for tilt switches connected
config INPUT_GPIO_ROTARY_ENCODER
tristate "Rotary encoders connected to GPIO pins"
- depends on GPIOLIB && GENERIC_GPIO
+ depends on GPIOLIB
help
Say Y here to add support for rotary encoders connected to GPIO lines.
Check file:Documentation/input/rotary-encoder.txt for more
config INPUT_RB532_BUTTON
tristate "Mikrotik Routerboard 532 button interface"
depends on MIKROTIK_RB532
- depends on GPIOLIB && GENERIC_GPIO
+ depends on GPIOLIB
select INPUT_POLLDEV
help
Say Y here if you want support for the S1 button built into
config MOUSE_GPIO
tristate "GPIO mouse"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select INPUT_POLLDEV
help
This driver simulates a mouse on GPIO lines of various CPUs (and some
config LEDS_GPIO
tristate "LED Support for GPIO connected LEDs"
depends on LEDS_CLASS
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
This option enables support for the LEDs connected to GPIO
outputs. To be useful the particular board must have LEDs
config LEDS_LT3593
tristate "LED driver for LT3593 controllers"
depends on LEDS_CLASS
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
This option enables support for LEDs driven by a Linear Technology
LT3593 controller. This controller uses a special one-wire pulse
config LEDS_RENESAS_TPU
bool "LED support for Renesas TPU"
- depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO
+ depends on LEDS_CLASS=y && HAVE_CLK && GPIOLIB
help
This option enables build of the LED TPU platform driver,
suitable to drive any TPU channel on newer Renesas SoCs.
comment "User Modules And Translation Layers"
-config MTD_CHAR
- tristate "Direct char device access to MTD devices"
- help
- This provides a character device for each MTD device present in
- the system, allowing the user to read and write directly to the
- memory chips, and also use ioctl() to obtain information about
- the device, or to erase parts of it.
-
-config HAVE_MTD_OTP
- bool
- help
- Enable access to OTP regions using MTD_CHAR.
-
config MTD_BLKDEVS
tristate "Common interface to block layer for MTD 'translation layers'"
depends on BLOCK
# Core functionality.
obj-$(CONFIG_MTD) += mtd.o
-mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o
+mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o
obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
# 'Users' - code which presents functionality to userspace.
-obj-$(CONFIG_MTD_CHAR) += mtdchar.o
obj-$(CONFIG_MTD_BLKDEVS) += mtd_blkdevs.o
obj-$(CONFIG_MTD_BLOCK) += mtdblock.o
obj-$(CONFIG_MTD_BLOCK_RO) += mtdblock_ro.o
config MTD_OTP
bool "Protection Registers aka one-time programmable (OTP) bits"
depends on MTD_CFI_ADV_OPTIONS
- select HAVE_MTD_OTP
default n
help
This enables support for reading, writing and locking so called
config MTD_DATAFLASH_OTP
bool "DataFlash OTP support (Security Register)"
depends on MTD_DATAFLASH
- select HAVE_MTD_OTP
help
Newer DataFlash chips (revisions C and D) support 128 bytes of
one-time-programmable (OTP) data. The first half may be written
comment "Disk-On-Chip Device Drivers"
-config MTD_DOC2000
- tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)"
- depends on MTD_NAND
- select MTD_DOCPROBE
- select MTD_NAND_IDS
- ---help---
- This provides an MTD device driver for the M-Systems DiskOnChip
- 2000 and Millennium devices. Originally designed for the DiskOnChip
- 2000, it also now includes support for the DiskOnChip Millennium.
- If you have problems with this driver and the DiskOnChip Millennium,
- you may wish to try the alternative Millennium driver below. To use
- the alternative driver, you will need to undefine DOC_SINGLE_DRIVER
- in the <file:drivers/mtd/devices/docprobe.c> source code.
-
- If you use this device, you probably also want to enable the NFTL
- 'NAND Flash Translation Layer' option below, which is used to
- emulate a block device by using a kind of file system on the flash
- chips.
-
- NOTE: This driver is deprecated and will probably be removed soon.
- Please try the new DiskOnChip driver under "NAND Flash Device
- Drivers".
-
-config MTD_DOC2001
- tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)"
- depends on MTD_NAND
- select MTD_DOCPROBE
- select MTD_NAND_IDS
- ---help---
- This provides an alternative MTD device driver for the M-Systems
- DiskOnChip Millennium devices. Use this if you have problems with
- the combined DiskOnChip 2000 and Millennium driver above. To get
- the DiskOnChip probe code to load and use this driver instead of
- the other one, you will need to undefine DOC_SINGLE_DRIVER near
- the beginning of <file:drivers/mtd/devices/docprobe.c>.
-
- If you use this device, you probably also want to enable the NFTL
- 'NAND Flash Translation Layer' option below, which is used to
- emulate a block device by using a kind of file system on the flash
- chips.
-
- NOTE: This driver is deprecated and will probably be removed soon.
- Please try the new DiskOnChip driver under "NAND Flash Device
- Drivers".
-
-config MTD_DOC2001PLUS
- tristate "M-Systems Disk-On-Chip Millennium Plus"
- depends on MTD_NAND
- select MTD_DOCPROBE
- select MTD_NAND_IDS
- ---help---
- This provides an MTD device driver for the M-Systems DiskOnChip
- Millennium Plus devices.
-
- If you use this device, you probably also want to enable the INFTL
- 'Inverse NAND Flash Translation Layer' option below, which is used
- to emulate a block device by using a kind of file system on the
- flash chips.
-
- NOTE: This driver will soon be replaced by the new DiskOnChip driver
- under "NAND Flash Device Drivers" (currently that driver does not
- support all Millennium Plus devices).
-
config MTD_DOCG3
tristate "M-Systems Disk-On-Chip G3"
select BCH
# linux/drivers/mtd/devices/Makefile
#
-obj-$(CONFIG_MTD_DOC2000) += doc2000.o
-obj-$(CONFIG_MTD_DOC2001) += doc2001.o
-obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o
obj-$(CONFIG_MTD_DOCG3) += docg3.o
-obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o
-obj-$(CONFIG_MTD_DOCECC) += docecc.o
obj-$(CONFIG_MTD_SLRAM) += slram.o
obj-$(CONFIG_MTD_PHRAM) += phram.o
obj-$(CONFIG_MTD_PMC551) += pmc551.o
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serial flash driver for BCMA bus");
-static const char *probes[] = { "bcm47xxpart", NULL };
+static const char * const probes[] = { "bcm47xxpart", NULL };
static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
}
sflash->priv = b47s;
+ b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash);
+
+ switch (b47s->bcma_cc->capabilities & BCMA_CC_CAP_FLASHT) {
+ case BCMA_CC_FLASHT_STSER:
+ b47s->type = BCM47XXSFLASH_TYPE_ST;
+ break;
+ case BCMA_CC_FLASHT_ATSER:
+ b47s->type = BCM47XXSFLASH_TYPE_ATMEL;
+ break;
+ }
+
b47s->window = sflash->window;
b47s->blocksize = sflash->blocksize;
b47s->numblocks = sflash->numblocks;
#include <linux/mtd/mtd.h>
+/* Used for ST flashes only. */
+#define OPCODE_ST_WREN 0x0006 /* Write Enable */
+#define OPCODE_ST_WRDIS 0x0004 /* Write Disable */
+#define OPCODE_ST_RDSR 0x0105 /* Read Status Register */
+#define OPCODE_ST_WRSR 0x0101 /* Write Status Register */
+#define OPCODE_ST_READ 0x0303 /* Read Data Bytes */
+#define OPCODE_ST_PP 0x0302 /* Page Program */
+#define OPCODE_ST_SE 0x02d8 /* Sector Erase */
+#define OPCODE_ST_BE 0x00c7 /* Bulk Erase */
+#define OPCODE_ST_DP 0x00b9 /* Deep Power-down */
+#define OPCODE_ST_RES 0x03ab /* Read Electronic Signature */
+#define OPCODE_ST_CSA 0x1000 /* Keep chip select asserted */
+#define OPCODE_ST_SSE 0x0220 /* Sub-sector Erase */
+
+/* Used for Atmel flashes only. */
+#define OPCODE_AT_READ 0x07e8
+#define OPCODE_AT_PAGE_READ 0x07d2
+#define OPCODE_AT_STATUS 0x01d7
+#define OPCODE_AT_BUF1_WRITE 0x0384
+#define OPCODE_AT_BUF2_WRITE 0x0387
+#define OPCODE_AT_BUF1_ERASE_PROGRAM 0x0283
+#define OPCODE_AT_BUF2_ERASE_PROGRAM 0x0286
+#define OPCODE_AT_BUF1_PROGRAM 0x0288
+#define OPCODE_AT_BUF2_PROGRAM 0x0289
+#define OPCODE_AT_PAGE_ERASE 0x0281
+#define OPCODE_AT_BLOCK_ERASE 0x0250
+#define OPCODE_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382
+#define OPCODE_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385
+#define OPCODE_AT_BUF1_LOAD 0x0253
+#define OPCODE_AT_BUF2_LOAD 0x0255
+#define OPCODE_AT_BUF1_COMPARE 0x0260
+#define OPCODE_AT_BUF2_COMPARE 0x0261
+#define OPCODE_AT_BUF1_REPROGRAM 0x0258
+#define OPCODE_AT_BUF2_REPROGRAM 0x0259
+
+/* Status register bits for ST flashes */
+#define SR_ST_WIP 0x01 /* Write In Progress */
+#define SR_ST_WEL 0x02 /* Write Enable Latch */
+#define SR_ST_BP_MASK 0x1c /* Block Protect */
+#define SR_ST_BP_SHIFT 2
+#define SR_ST_SRWD 0x80 /* Status Register Write Disable */
+
+/* Status register bits for Atmel flashes */
+#define SR_AT_READY 0x80
+#define SR_AT_MISMATCH 0x40
+#define SR_AT_ID_MASK 0x38
+#define SR_AT_ID_SHIFT 3
+
+struct bcma_drv_cc;
+
+enum bcm47xxsflash_type {
+ BCM47XXSFLASH_TYPE_ATMEL,
+ BCM47XXSFLASH_TYPE_ST,
+};
+
struct bcm47xxsflash {
+ struct bcma_drv_cc *bcma_cc;
+
+ enum bcm47xxsflash_type type;
+
u32 window;
u32 blocksize;
u16 numblocks;
+++ /dev/null
-
-/*
- * Linux driver for Disk-On-Chip 2000 and Millennium
- * (c) 1999 Machine Vision Holdings, Inc.
- * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/mutex.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/doc2000.h>
-
-#define DOC_SUPPORT_2000
-#define DOC_SUPPORT_2000TSOP
-#define DOC_SUPPORT_MILLENNIUM
-
-#ifdef DOC_SUPPORT_2000
-#define DoC_is_2000(doc) (doc->ChipID == DOC_ChipID_Doc2k)
-#else
-#define DoC_is_2000(doc) (0)
-#endif
-
-#if defined(DOC_SUPPORT_2000TSOP) || defined(DOC_SUPPORT_MILLENNIUM)
-#define DoC_is_Millennium(doc) (doc->ChipID == DOC_ChipID_DocMil)
-#else
-#define DoC_is_Millennium(doc) (0)
-#endif
-
-/* #define ECC_DEBUG */
-
-/* I have no idea why some DoC chips can not use memcpy_from|to_io().
- * This may be due to the different revisions of the ASIC controller built-in or
- * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment
- * this:
- #undef USE_MEMCPY
-*/
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf);
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf);
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
- size_t *retlen, const u_char *buf);
-static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
-
-static struct mtd_info *doc2klist = NULL;
-
-/* Perform the required delay cycles by reading from the appropriate register */
-static void DoC_Delay(struct DiskOnChip *doc, unsigned short cycles)
-{
- volatile char dummy;
- int i;
-
- for (i = 0; i < cycles; i++) {
- if (DoC_is_Millennium(doc))
- dummy = ReadDOC(doc->virtadr, NOP);
- else
- dummy = ReadDOC(doc->virtadr, DOCStatus);
- }
-
-}
-
-/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
-static int _DoC_WaitReady(struct DiskOnChip *doc)
-{
- void __iomem *docptr = doc->virtadr;
- unsigned long timeo = jiffies + (HZ * 10);
-
- pr_debug("_DoC_WaitReady called for out-of-line wait\n");
-
- /* Out-of-line routine to wait for chip response */
- while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
- /* issue 2 read from NOP register after reading from CDSNControl register
- see Software Requirement 11.4 item 2. */
- DoC_Delay(doc, 2);
-
- if (time_after(jiffies, timeo)) {
- pr_debug("_DoC_WaitReady timed out.\n");
- return -EIO;
- }
- udelay(1);
- cond_resched();
- }
-
- return 0;
-}
-
-static inline int DoC_WaitReady(struct DiskOnChip *doc)
-{
- void __iomem *docptr = doc->virtadr;
-
- /* This is inline, to optimise the common case, where it's ready instantly */
- int ret = 0;
-
- /* 4 read form NOP register should be issued in prior to the read from CDSNControl
- see Software Requirement 11.4 item 2. */
- DoC_Delay(doc, 4);
-
- if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
- /* Call the out-of-line routine to wait */
- ret = _DoC_WaitReady(doc);
-
- /* issue 2 read from NOP register after reading from CDSNControl register
- see Software Requirement 11.4 item 2. */
- DoC_Delay(doc, 2);
-
- return ret;
-}
-
-/* DoC_Command: Send a flash command to the flash chip through the CDSN Slow IO register to
- bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
- required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-
-static int DoC_Command(struct DiskOnChip *doc, unsigned char command,
- unsigned char xtraflags)
-{
- void __iomem *docptr = doc->virtadr;
-
- if (DoC_is_2000(doc))
- xtraflags |= CDSN_CTRL_FLASH_IO;
-
- /* Assert the CLE (Command Latch Enable) line to the flash chip */
- WriteDOC(xtraflags | CDSN_CTRL_CLE | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- if (DoC_is_Millennium(doc))
- WriteDOC(command, docptr, CDSNSlowIO);
-
- /* Send the command */
- WriteDOC_(command, docptr, doc->ioreg);
- if (DoC_is_Millennium(doc))
- WriteDOC(command, docptr, WritePipeTerm);
-
- /* Lower the CLE line */
- WriteDOC(xtraflags | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- /* Wait for the chip to respond - Software requirement 11.4.1 (extended for any command) */
- return DoC_WaitReady(doc);
-}
-
-/* DoC_Address: Set the current address for the flash chip through the CDSN Slow IO register to
- bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
- required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-
-static int DoC_Address(struct DiskOnChip *doc, int numbytes, unsigned long ofs,
- unsigned char xtraflags1, unsigned char xtraflags2)
-{
- int i;
- void __iomem *docptr = doc->virtadr;
-
- if (DoC_is_2000(doc))
- xtraflags1 |= CDSN_CTRL_FLASH_IO;
-
- /* Assert the ALE (Address Latch Enable) line to the flash chip */
- WriteDOC(xtraflags1 | CDSN_CTRL_ALE | CDSN_CTRL_CE, docptr, CDSNControl);
-
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- /* Send the address */
- /* Devices with 256-byte page are addressed as:
- Column (bits 0-7), Page (bits 8-15, 16-23, 24-31)
- * there is no device on the market with page256
- and more than 24 bits.
- Devices with 512-byte page are addressed as:
- Column (bits 0-7), Page (bits 9-16, 17-24, 25-31)
- * 25-31 is sent only if the chip support it.
- * bit 8 changes the read command to be sent
- (NAND_CMD_READ0 or NAND_CMD_READ1).
- */
-
- if (numbytes == ADDR_COLUMN || numbytes == ADDR_COLUMN_PAGE) {
- if (DoC_is_Millennium(doc))
- WriteDOC(ofs & 0xff, docptr, CDSNSlowIO);
- WriteDOC_(ofs & 0xff, docptr, doc->ioreg);
- }
-
- if (doc->page256) {
- ofs = ofs >> 8;
- } else {
- ofs = ofs >> 9;
- }
-
- if (numbytes == ADDR_PAGE || numbytes == ADDR_COLUMN_PAGE) {
- for (i = 0; i < doc->pageadrlen; i++, ofs = ofs >> 8) {
- if (DoC_is_Millennium(doc))
- WriteDOC(ofs & 0xff, docptr, CDSNSlowIO);
- WriteDOC_(ofs & 0xff, docptr, doc->ioreg);
- }
- }
-
- if (DoC_is_Millennium(doc))
- WriteDOC(ofs & 0xff, docptr, WritePipeTerm);
-
- DoC_Delay(doc, 2); /* Needed for some slow flash chips. mf. */
-
- /* FIXME: The SlowIO's for millennium could be replaced by
- a single WritePipeTerm here. mf. */
-
- /* Lower the ALE line */
- WriteDOC(xtraflags1 | xtraflags2 | CDSN_CTRL_CE, docptr,
- CDSNControl);
-
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- /* Wait for the chip to respond - Software requirement 11.4.1 */
- return DoC_WaitReady(doc);
-}
-
-/* Read a buffer from DoC, taking care of Millennium odditys */
-static void DoC_ReadBuf(struct DiskOnChip *doc, u_char * buf, int len)
-{
- volatile int dummy;
- int modulus = 0xffff;
- void __iomem *docptr = doc->virtadr;
- int i;
-
- if (len <= 0)
- return;
-
- if (DoC_is_Millennium(doc)) {
- /* Read the data via the internal pipeline through CDSN IO register,
- see Pipelined Read Operations 11.3 */
- dummy = ReadDOC(docptr, ReadPipeInit);
-
- /* Millennium should use the LastDataRead register - Pipeline Reads */
- len--;
-
- /* This is needed for correctly ECC calculation */
- modulus = 0xff;
- }
-
- for (i = 0; i < len; i++)
- buf[i] = ReadDOC_(docptr, doc->ioreg + (i & modulus));
-
- if (DoC_is_Millennium(doc)) {
- buf[i] = ReadDOC(docptr, LastDataRead);
- }
-}
-
-/* Write a buffer to DoC, taking care of Millennium odditys */
-static void DoC_WriteBuf(struct DiskOnChip *doc, const u_char * buf, int len)
-{
- void __iomem *docptr = doc->virtadr;
- int i;
-
- if (len <= 0)
- return;
-
- for (i = 0; i < len; i++)
- WriteDOC_(buf[i], docptr, doc->ioreg + i);
-
- if (DoC_is_Millennium(doc)) {
- WriteDOC(0x00, docptr, WritePipeTerm);
- }
-}
-
-
-/* DoC_SelectChip: Select a given flash chip within the current floor */
-
-static inline int DoC_SelectChip(struct DiskOnChip *doc, int chip)
-{
- void __iomem *docptr = doc->virtadr;
-
- /* Software requirement 11.4.4 before writing DeviceSelect */
- /* Deassert the CE line to eliminate glitches on the FCE# outputs */
- WriteDOC(CDSN_CTRL_WP, docptr, CDSNControl);
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- /* Select the individual flash chip requested */
- WriteDOC(chip, docptr, CDSNDeviceSelect);
- DoC_Delay(doc, 4);
-
- /* Reassert the CE line */
- WriteDOC(CDSN_CTRL_CE | CDSN_CTRL_FLASH_IO | CDSN_CTRL_WP, docptr,
- CDSNControl);
- DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */
-
- /* Wait for it to be ready */
- return DoC_WaitReady(doc);
-}
-
-/* DoC_SelectFloor: Select a given floor (bank of flash chips) */
-
-static inline int DoC_SelectFloor(struct DiskOnChip *doc, int floor)
-{
- void __iomem *docptr = doc->virtadr;
-
- /* Select the floor (bank) of chips required */
- WriteDOC(floor, docptr, FloorSelect);
-
- /* Wait for the chip to be ready */
- return DoC_WaitReady(doc);
-}
-
-/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */
-
-static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
-{
- int mfr, id, i, j;
- volatile char dummy;
-
- /* Page in the required floor/chip */
- DoC_SelectFloor(doc, floor);
- DoC_SelectChip(doc, chip);
-
- /* Reset the chip */
- if (DoC_Command(doc, NAND_CMD_RESET, CDSN_CTRL_WP)) {
- pr_debug("DoC_Command (reset) for %d,%d returned true\n",
- floor, chip);
- return 0;
- }
-
-
- /* Read the NAND chip ID: 1. Send ReadID command */
- if (DoC_Command(doc, NAND_CMD_READID, CDSN_CTRL_WP)) {
- pr_debug("DoC_Command (ReadID) for %d,%d returned true\n",
- floor, chip);
- return 0;
- }
-
- /* Read the NAND chip ID: 2. Send address byte zero */
- DoC_Address(doc, ADDR_COLUMN, 0, CDSN_CTRL_WP, 0);
-
- /* Read the manufacturer and device id codes from the device */
-
- if (DoC_is_Millennium(doc)) {
- DoC_Delay(doc, 2);
- dummy = ReadDOC(doc->virtadr, ReadPipeInit);
- mfr = ReadDOC(doc->virtadr, LastDataRead);
-
- DoC_Delay(doc, 2);
- dummy = ReadDOC(doc->virtadr, ReadPipeInit);
- id = ReadDOC(doc->virtadr, LastDataRead);
- } else {
- /* CDSN Slow IO register see Software Req 11.4 item 5. */
- dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
- DoC_Delay(doc, 2);
- mfr = ReadDOC_(doc->virtadr, doc->ioreg);
-
- /* CDSN Slow IO register see Software Req 11.4 item 5. */
- dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
- DoC_Delay(doc, 2);
- id = ReadDOC_(doc->virtadr, doc->ioreg);
- }
-
- /* No response - return failure */
- if (mfr == 0xff || mfr == 0)
- return 0;
-
- /* Check it's the same as the first chip we identified.
- * M-Systems say that any given DiskOnChip device should only
- * contain _one_ type of flash part, although that's not a
- * hardware restriction. */
- if (doc->mfr) {
- if (doc->mfr == mfr && doc->id == id)
- return 1; /* This is the same as the first */
- else
- printk(KERN_WARNING
- "Flash chip at floor %d, chip %d is different:\n",
- floor, chip);
- }
-
- /* Print and store the manufacturer and ID codes. */
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if (id == nand_flash_ids[i].id) {
- /* Try to identify manufacturer */
- for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
- if (nand_manuf_ids[j].id == mfr)
- break;
- }
- printk(KERN_INFO
- "Flash chip found: Manufacturer ID: %2.2X, "
- "Chip ID: %2.2X (%s:%s)\n", mfr, id,
- nand_manuf_ids[j].name, nand_flash_ids[i].name);
- if (!doc->mfr) {
- doc->mfr = mfr;
- doc->id = id;
- doc->chipshift =
- ffs((nand_flash_ids[i].chipsize << 20)) - 1;
- doc->page256 = (nand_flash_ids[i].pagesize == 256) ? 1 : 0;
- doc->pageadrlen = doc->chipshift > 25 ? 3 : 2;
- doc->erasesize =
- nand_flash_ids[i].erasesize;
- return 1;
- }
- return 0;
- }
- }
-
-
- /* We haven't fully identified the chip. Print as much as we know. */
- printk(KERN_WARNING "Unknown flash chip found: %2.2X %2.2X\n",
- id, mfr);
-
- printk(KERN_WARNING "Please report to dwmw2@infradead.org\n");
- return 0;
-}
-
-/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
-
-static void DoC_ScanChips(struct DiskOnChip *this, int maxchips)
-{
- int floor, chip;
- int numchips[MAX_FLOORS];
- int ret = 1;
-
- this->numchips = 0;
- this->mfr = 0;
- this->id = 0;
-
- /* For each floor, find the number of valid chips it contains */
- for (floor = 0; floor < MAX_FLOORS; floor++) {
- ret = 1;
- numchips[floor] = 0;
- for (chip = 0; chip < maxchips && ret != 0; chip++) {
-
- ret = DoC_IdentChip(this, floor, chip);
- if (ret) {
- numchips[floor]++;
- this->numchips++;
- }
- }
- }
-
- /* If there are none at all that we recognise, bail */
- if (!this->numchips) {
- printk(KERN_NOTICE "No flash chips recognised.\n");
- return;
- }
-
- /* Allocate an array to hold the information for each chip */
- this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL);
- if (!this->chips) {
- printk(KERN_NOTICE "No memory for allocating chip info structures\n");
- return;
- }
-
- ret = 0;
-
- /* Fill out the chip array with {floor, chipno} for each
- * detected chip in the device. */
- for (floor = 0; floor < MAX_FLOORS; floor++) {
- for (chip = 0; chip < numchips[floor]; chip++) {
- this->chips[ret].floor = floor;
- this->chips[ret].chip = chip;
- this->chips[ret].curadr = 0;
- this->chips[ret].curmode = 0x50;
- ret++;
- }
- }
-
- /* Calculate and print the total size of the device */
- this->totlen = this->numchips * (1 << this->chipshift);
-
- printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n",
- this->numchips, this->totlen >> 20);
-}
-
-static int DoC2k_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
-{
- int tmp1, tmp2, retval;
- if (doc1->physadr == doc2->physadr)
- return 1;
-
- /* Use the alias resolution register which was set aside for this
- * purpose. If it's value is the same on both chips, they might
- * be the same chip, and we write to one and check for a change in
- * the other. It's unclear if this register is usuable in the
- * DoC 2000 (it's in the Millennium docs), but it seems to work. */
- tmp1 = ReadDOC(doc1->virtadr, AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
- if (tmp1 != tmp2)
- return 0;
-
- WriteDOC((tmp1 + 1) % 0xff, doc1->virtadr, AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
- if (tmp2 == (tmp1 + 1) % 0xff)
- retval = 1;
- else
- retval = 0;
-
- /* Restore register contents. May not be necessary, but do it just to
- * be safe. */
- WriteDOC(tmp1, doc1->virtadr, AliasResolution);
-
- return retval;
-}
-
-/* This routine is found from the docprobe code by symbol_get(),
- * which will bump the use count of this module. */
-void DoC2k_init(struct mtd_info *mtd)
-{
- struct DiskOnChip *this = mtd->priv;
- struct DiskOnChip *old = NULL;
- int maxchips;
-
- /* We must avoid being called twice for the same device. */
-
- if (doc2klist)
- old = doc2klist->priv;
-
- while (old) {
- if (DoC2k_is_alias(old, this)) {
- printk(KERN_NOTICE
- "Ignoring DiskOnChip 2000 at 0x%lX - already configured\n",
- this->physadr);
- iounmap(this->virtadr);
- kfree(mtd);
- return;
- }
- if (old->nextdoc)
- old = old->nextdoc->priv;
- else
- old = NULL;
- }
-
-
- switch (this->ChipID) {
- case DOC_ChipID_Doc2kTSOP:
- mtd->name = "DiskOnChip 2000 TSOP";
- this->ioreg = DoC_Mil_CDSN_IO;
- /* Pretend it's a Millennium */
- this->ChipID = DOC_ChipID_DocMil;
- maxchips = MAX_CHIPS;
- break;
- case DOC_ChipID_Doc2k:
- mtd->name = "DiskOnChip 2000";
- this->ioreg = DoC_2k_CDSN_IO;
- maxchips = MAX_CHIPS;
- break;
- case DOC_ChipID_DocMil:
- mtd->name = "DiskOnChip Millennium";
- this->ioreg = DoC_Mil_CDSN_IO;
- maxchips = MAX_CHIPS_MIL;
- break;
- default:
- printk("Unknown ChipID 0x%02x\n", this->ChipID);
- kfree(mtd);
- iounmap(this->virtadr);
- return;
- }
-
- printk(KERN_NOTICE "%s found at address 0x%lX\n", mtd->name,
- this->physadr);
-
- mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
- mtd->writebufsize = mtd->writesize = 512;
- mtd->oobsize = 16;
- mtd->ecc_strength = 2;
- mtd->owner = THIS_MODULE;
- mtd->_erase = doc_erase;
- mtd->_read = doc_read;
- mtd->_write = doc_write;
- mtd->_read_oob = doc_read_oob;
- mtd->_write_oob = doc_write_oob;
- this->curfloor = -1;
- this->curchip = -1;
- mutex_init(&this->lock);
-
- /* Ident all the chips present. */
- DoC_ScanChips(this, maxchips);
-
- if (!this->totlen) {
- kfree(mtd);
- iounmap(this->virtadr);
- } else {
- this->nextdoc = doc2klist;
- doc2klist = mtd;
- mtd->size = this->totlen;
- mtd->erasesize = this->erasesize;
- mtd_device_register(mtd, NULL, 0);
- return;
- }
-}
-EXPORT_SYMBOL_GPL(DoC2k_init);
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t * retlen, u_char * buf)
-{
- struct DiskOnChip *this = mtd->priv;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip;
- unsigned char syndrome[6], eccbuf[6];
- volatile char dummy;
- int i, len256 = 0, ret=0;
- size_t left = len;
-
- mutex_lock(&this->lock);
- while (left) {
- len = left;
-
- /* Don't allow a single read to cross a 512-byte block boundary */
- if (from + len > ((from | 0x1ff) + 1))
- len = ((from | 0x1ff) + 1) - from;
-
- /* The ECC will not be calculated correctly if less than 512 is read */
- if (len != 0x200)
- printk(KERN_WARNING
- "ECC needs a full sector read (adr: %lx size %lx)\n",
- (long) from, (long) len);
-
- /* printk("DoC_Read (adr: %lx size %lx)\n", (long) from, (long) len); */
-
-
- /* Find the chip which is to be used and select it */
- mychip = &this->chips[from >> (this->chipshift)];
-
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
-
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- DoC_Command(this,
- (!this->page256
- && (from & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
- CDSN_CTRL_WP);
- DoC_Address(this, ADDR_COLUMN_PAGE, from, CDSN_CTRL_WP,
- CDSN_CTRL_ECC_IO);
-
- /* Prime the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC(DOC_ECC_EN, docptr, ECCConf);
-
- /* treat crossing 256-byte sector for 2M x 8bits devices */
- if (this->page256 && from + len > (from | 0xff) + 1) {
- len256 = (from | 0xff) + 1 - from;
- DoC_ReadBuf(this, buf, len256);
-
- DoC_Command(this, NAND_CMD_READ0, CDSN_CTRL_WP);
- DoC_Address(this, ADDR_COLUMN_PAGE, from + len256,
- CDSN_CTRL_WP, CDSN_CTRL_ECC_IO);
- }
-
- DoC_ReadBuf(this, &buf[len256], len - len256);
-
- /* Let the caller know we completed it */
- *retlen += len;
-
- /* Read the ECC data through the DiskOnChip ECC logic */
- /* Note: this will work even with 2M x 8bit devices as */
- /* they have 8 bytes of OOB per 256 page. mf. */
- DoC_ReadBuf(this, eccbuf, 6);
-
- /* Flush the pipeline */
- if (DoC_is_Millennium(this)) {
- dummy = ReadDOC(docptr, ECCConf);
- dummy = ReadDOC(docptr, ECCConf);
- i = ReadDOC(docptr, ECCConf);
- } else {
- dummy = ReadDOC(docptr, 2k_ECCStatus);
- dummy = ReadDOC(docptr, 2k_ECCStatus);
- i = ReadDOC(docptr, 2k_ECCStatus);
- }
-
- /* Check the ECC Status */
- if (i & 0x80) {
- int nb_errors;
- /* There was an ECC error */
-#ifdef ECC_DEBUG
- printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from);
-#endif
- /* Read the ECC syndrome through the DiskOnChip ECC
- logic. These syndrome will be all ZERO when there
- is no error */
- for (i = 0; i < 6; i++) {
- syndrome[i] =
- ReadDOC(docptr, ECCSyndrome0 + i);
- }
- nb_errors = doc_decode_ecc(buf, syndrome);
-
-#ifdef ECC_DEBUG
- printk(KERN_ERR "Errors corrected: %x\n", nb_errors);
-#endif
- if (nb_errors < 0) {
- /* We return error, but have actually done the
- read. Not that this can be told to
- user-space, via sys_read(), but at least
- MTD-aware stuff can know about it by
- checking *retlen */
- ret = -EIO;
- }
- }
-
-#ifdef PSYCHO_DEBUG
- printk(KERN_DEBUG "ECC DATA at %lxB: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long)from, eccbuf[0], eccbuf[1], eccbuf[2],
- eccbuf[3], eccbuf[4], eccbuf[5]);
-#endif
-
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
-
- /* according to 11.4.1, we need to wait for the busy line
- * drop if we read to the end of the page. */
- if(0 == ((from + len) & 0x1ff))
- {
- DoC_WaitReady(this);
- }
-
- from += len;
- left -= len;
- buf += len;
- }
-
- mutex_unlock(&this->lock);
-
- return ret;
-}
-
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t * retlen, const u_char * buf)
-{
- struct DiskOnChip *this = mtd->priv;
- int di; /* Yes, DI is a hangover from when I was disassembling the binary driver */
- void __iomem *docptr = this->virtadr;
- unsigned char eccbuf[6];
- volatile char dummy;
- int len256 = 0;
- struct Nand *mychip;
- size_t left = len;
- int status;
-
- mutex_lock(&this->lock);
- while (left) {
- len = left;
-
- /* Don't allow a single write to cross a 512-byte block boundary */
- if (to + len > ((to | 0x1ff) + 1))
- len = ((to | 0x1ff) + 1) - to;
-
- /* The ECC will not be calculated correctly if less than 512 is written */
-/* DBB-
- if (len != 0x200 && eccbuf)
- printk(KERN_WARNING
- "ECC needs a full sector write (adr: %lx size %lx)\n",
- (long) to, (long) len);
- -DBB */
-
- /* printk("DoC_Write (adr: %lx size %lx)\n", (long) to, (long) len); */
-
- /* Find the chip which is to be used and select it */
- mychip = &this->chips[to >> (this->chipshift)];
-
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
-
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Set device to main plane of flash */
- DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP);
- DoC_Command(this,
- (!this->page256
- && (to & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
- CDSN_CTRL_WP);
-
- DoC_Command(this, NAND_CMD_SEQIN, 0);
- DoC_Address(this, ADDR_COLUMN_PAGE, to, 0, CDSN_CTRL_ECC_IO);
-
- /* Prime the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
-
- /* treat crossing 256-byte sector for 2M x 8bits devices */
- if (this->page256 && to + len > (to | 0xff) + 1) {
- len256 = (to | 0xff) + 1 - to;
- DoC_WriteBuf(this, buf, len256);
-
- DoC_Command(this, NAND_CMD_PAGEPROG, 0);
-
- DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
- /* There's an implicit DoC_WaitReady() in DoC_Command */
-
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
-
- if (ReadDOC_(docptr, this->ioreg) & 1) {
- printk(KERN_ERR "Error programming flash\n");
- /* Error in programming */
- *retlen = 0;
- mutex_unlock(&this->lock);
- return -EIO;
- }
-
- DoC_Command(this, NAND_CMD_SEQIN, 0);
- DoC_Address(this, ADDR_COLUMN_PAGE, to + len256, 0,
- CDSN_CTRL_ECC_IO);
- }
-
- DoC_WriteBuf(this, &buf[len256], len - len256);
-
- WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_CE, docptr, CDSNControl);
-
- if (DoC_is_Millennium(this)) {
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
- } else {
- WriteDOC_(0, docptr, this->ioreg);
- WriteDOC_(0, docptr, this->ioreg);
- WriteDOC_(0, docptr, this->ioreg);
- }
-
- WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_FLASH_IO | CDSN_CTRL_CE, docptr,
- CDSNControl);
-
- /* Read the ECC data through the DiskOnChip ECC logic */
- for (di = 0; di < 6; di++) {
- eccbuf[di] = ReadDOC(docptr, ECCSyndrome0 + di);
- }
-
- /* Reset the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
-
-#ifdef PSYCHO_DEBUG
- printk
- ("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
- eccbuf[4], eccbuf[5]);
-#endif
- DoC_Command(this, NAND_CMD_PAGEPROG, 0);
-
- DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
- /* There's an implicit DoC_WaitReady() in DoC_Command */
-
- if (DoC_is_Millennium(this)) {
- ReadDOC(docptr, ReadPipeInit);
- status = ReadDOC(docptr, LastDataRead);
- } else {
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
- status = ReadDOC_(docptr, this->ioreg);
- }
-
- if (status & 1) {
- printk(KERN_ERR "Error programming flash\n");
- /* Error in programming */
- *retlen = 0;
- mutex_unlock(&this->lock);
- return -EIO;
- }
-
- /* Let the caller know we completed it */
- *retlen += len;
-
- {
- unsigned char x[8];
- size_t dummy;
- int ret;
-
- /* Write the ECC data to flash */
- for (di=0; di<6; di++)
- x[di] = eccbuf[di];
-
- x[6]=0x55;
- x[7]=0x55;
-
- ret = doc_write_oob_nolock(mtd, to, 8, &dummy, x);
- if (ret) {
- mutex_unlock(&this->lock);
- return ret;
- }
- }
-
- to += len;
- left -= len;
- buf += len;
- }
-
- mutex_unlock(&this->lock);
- return 0;
-}
-
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- struct DiskOnChip *this = mtd->priv;
- int len256 = 0, ret;
- struct Nand *mychip;
- uint8_t *buf = ops->oobbuf;
- size_t len = ops->len;
-
- BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
- ofs += ops->ooboffs;
-
- mutex_lock(&this->lock);
-
- mychip = &this->chips[ofs >> this->chipshift];
-
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* update address for 2M x 8bit devices. OOB starts on the second */
- /* page to maintain compatibility with doc_read_ecc. */
- if (this->page256) {
- if (!(ofs & 0x8))
- ofs += 0x100;
- else
- ofs -= 0x8;
- }
-
- DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP);
- DoC_Address(this, ADDR_COLUMN_PAGE, ofs, CDSN_CTRL_WP, 0);
-
- /* treat crossing 8-byte OOB data for 2M x 8bit devices */
- /* Note: datasheet says it should automaticaly wrap to the */
- /* next OOB block, but it didn't work here. mf. */
- if (this->page256 && ofs + len > (ofs | 0x7) + 1) {
- len256 = (ofs | 0x7) + 1 - ofs;
- DoC_ReadBuf(this, buf, len256);
-
- DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP);
- DoC_Address(this, ADDR_COLUMN_PAGE, ofs & (~0x1ff),
- CDSN_CTRL_WP, 0);
- }
-
- DoC_ReadBuf(this, &buf[len256], len - len256);
-
- ops->retlen = len;
- /* Reading the full OOB data drops us off of the end of the page,
- * causing the flash device to go into busy mode, so we need
- * to wait until ready 11.4.1 and Toshiba TC58256FT docs */
-
- ret = DoC_WaitReady(this);
-
- mutex_unlock(&this->lock);
- return ret;
-
-}
-
-static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
- size_t * retlen, const u_char * buf)
-{
- struct DiskOnChip *this = mtd->priv;
- int len256 = 0;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
- volatile int dummy;
- int status;
-
- // printk("doc_write_oob(%lx, %d): %2.2X %2.2X %2.2X %2.2X ... %2.2X %2.2X .. %2.2X %2.2X\n",(long)ofs, len,
- // buf[0], buf[1], buf[2], buf[3], buf[8], buf[9], buf[14],buf[15]);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* disable the ECC engine */
- WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP);
-
- /* issue the Read2 command to set the pointer to the Spare Data Area. */
- DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP);
-
- /* update address for 2M x 8bit devices. OOB starts on the second */
- /* page to maintain compatibility with doc_read_ecc. */
- if (this->page256) {
- if (!(ofs & 0x8))
- ofs += 0x100;
- else
- ofs -= 0x8;
- }
-
- /* issue the Serial Data In command to initial the Page Program process */
- DoC_Command(this, NAND_CMD_SEQIN, 0);
- DoC_Address(this, ADDR_COLUMN_PAGE, ofs, 0, 0);
-
- /* treat crossing 8-byte OOB data for 2M x 8bit devices */
- /* Note: datasheet says it should automaticaly wrap to the */
- /* next OOB block, but it didn't work here. mf. */
- if (this->page256 && ofs + len > (ofs | 0x7) + 1) {
- len256 = (ofs | 0x7) + 1 - ofs;
- DoC_WriteBuf(this, buf, len256);
-
- DoC_Command(this, NAND_CMD_PAGEPROG, 0);
- DoC_Command(this, NAND_CMD_STATUS, 0);
- /* DoC_WaitReady() is implicit in DoC_Command */
-
- if (DoC_is_Millennium(this)) {
- ReadDOC(docptr, ReadPipeInit);
- status = ReadDOC(docptr, LastDataRead);
- } else {
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
- status = ReadDOC_(docptr, this->ioreg);
- }
-
- if (status & 1) {
- printk(KERN_ERR "Error programming oob data\n");
- /* There was an error */
- *retlen = 0;
- return -EIO;
- }
- DoC_Command(this, NAND_CMD_SEQIN, 0);
- DoC_Address(this, ADDR_COLUMN_PAGE, ofs & (~0x1ff), 0, 0);
- }
-
- DoC_WriteBuf(this, &buf[len256], len - len256);
-
- DoC_Command(this, NAND_CMD_PAGEPROG, 0);
- DoC_Command(this, NAND_CMD_STATUS, 0);
- /* DoC_WaitReady() is implicit in DoC_Command */
-
- if (DoC_is_Millennium(this)) {
- ReadDOC(docptr, ReadPipeInit);
- status = ReadDOC(docptr, LastDataRead);
- } else {
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
- status = ReadDOC_(docptr, this->ioreg);
- }
-
- if (status & 1) {
- printk(KERN_ERR "Error programming oob data\n");
- /* There was an error */
- *retlen = 0;
- return -EIO;
- }
-
- *retlen = len;
- return 0;
-
-}
-
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- struct DiskOnChip *this = mtd->priv;
- int ret;
-
- BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
- mutex_lock(&this->lock);
- ret = doc_write_oob_nolock(mtd, ofs + ops->ooboffs, ops->len,
- &ops->retlen, ops->oobbuf);
-
- mutex_unlock(&this->lock);
- return ret;
-}
-
-static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- struct DiskOnChip *this = mtd->priv;
- __u32 ofs = instr->addr;
- __u32 len = instr->len;
- volatile int dummy;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip;
- int status;
-
- mutex_lock(&this->lock);
-
- if (ofs & (mtd->erasesize-1) || len & (mtd->erasesize-1)) {
- mutex_unlock(&this->lock);
- return -EINVAL;
- }
-
- instr->state = MTD_ERASING;
-
- /* FIXME: Do this in the background. Use timers or schedule_task() */
- while(len) {
- mychip = &this->chips[ofs >> this->chipshift];
-
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- DoC_Command(this, NAND_CMD_ERASE1, 0);
- DoC_Address(this, ADDR_PAGE, ofs, 0, 0);
- DoC_Command(this, NAND_CMD_ERASE2, 0);
-
- DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
-
- if (DoC_is_Millennium(this)) {
- ReadDOC(docptr, ReadPipeInit);
- status = ReadDOC(docptr, LastDataRead);
- } else {
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
- status = ReadDOC_(docptr, this->ioreg);
- }
-
- if (status & 1) {
- printk(KERN_ERR "Error erasing at 0x%x\n", ofs);
- /* There was an error */
- instr->state = MTD_ERASE_FAILED;
- goto callback;
- }
- ofs += mtd->erasesize;
- len -= mtd->erasesize;
- }
- instr->state = MTD_ERASE_DONE;
-
- callback:
- mtd_erase_callback(instr);
-
- mutex_unlock(&this->lock);
- return 0;
-}
-
-
-/****************************************************************************
- *
- * Module stuff
- *
- ****************************************************************************/
-
-static void __exit cleanup_doc2000(void)
-{
- struct mtd_info *mtd;
- struct DiskOnChip *this;
-
- while ((mtd = doc2klist)) {
- this = mtd->priv;
- doc2klist = this->nextdoc;
-
- mtd_device_unregister(mtd);
-
- iounmap(this->virtadr);
- kfree(this->chips);
- kfree(mtd);
- }
-}
-
-module_exit(cleanup_doc2000);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
-MODULE_DESCRIPTION("MTD driver for DiskOnChip 2000 and Millennium");
-
+++ /dev/null
-
-/*
- * Linux driver for Disk-On-Chip Millennium
- * (c) 1999 Machine Vision Holdings, Inc.
- * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/doc2000.h>
-
-/* #define ECC_DEBUG */
-
-/* I have no idea why some DoC chips can not use memcop_form|to_io().
- * This may be due to the different revisions of the ASIC controller built-in or
- * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment
- * this:*/
-#undef USE_MEMCPY
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf);
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf);
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
-
-static struct mtd_info *docmillist = NULL;
-
-/* Perform the required delay cycles by reading from the NOP register */
-static void DoC_Delay(void __iomem * docptr, unsigned short cycles)
-{
- volatile char dummy;
- int i;
-
- for (i = 0; i < cycles; i++)
- dummy = ReadDOC(docptr, NOP);
-}
-
-/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
-static int _DoC_WaitReady(void __iomem * docptr)
-{
- unsigned short c = 0xffff;
-
- pr_debug("_DoC_WaitReady called for out-of-line wait\n");
-
- /* Out-of-line routine to wait for chip response */
- while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B) && --c)
- ;
-
- if (c == 0)
- pr_debug("_DoC_WaitReady timed out.\n");
-
- return (c == 0);
-}
-
-static inline int DoC_WaitReady(void __iomem * docptr)
-{
- /* This is inline, to optimise the common case, where it's ready instantly */
- int ret = 0;
-
- /* 4 read form NOP register should be issued in prior to the read from CDSNControl
- see Software Requirement 11.4 item 2. */
- DoC_Delay(docptr, 4);
-
- if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
- /* Call the out-of-line routine to wait */
- ret = _DoC_WaitReady(docptr);
-
- /* issue 2 read from NOP register after reading from CDSNControl register
- see Software Requirement 11.4 item 2. */
- DoC_Delay(docptr, 2);
-
- return ret;
-}
-
-/* DoC_Command: Send a flash command to the flash chip through the CDSN IO register
- with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
- required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-
-static void DoC_Command(void __iomem * docptr, unsigned char command,
- unsigned char xtraflags)
-{
- /* Assert the CLE (Command Latch Enable) line to the flash chip */
- WriteDOC(xtraflags | CDSN_CTRL_CLE | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(docptr, 4);
-
- /* Send the command */
- WriteDOC(command, docptr, Mil_CDSN_IO);
- WriteDOC(0x00, docptr, WritePipeTerm);
-
- /* Lower the CLE line */
- WriteDOC(xtraflags | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(docptr, 4);
-}
-
-/* DoC_Address: Set the current address for the flash chip through the CDSN IO register
- with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
- required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-
-static inline void DoC_Address(void __iomem * docptr, int numbytes, unsigned long ofs,
- unsigned char xtraflags1, unsigned char xtraflags2)
-{
- /* Assert the ALE (Address Latch Enable) line to the flash chip */
- WriteDOC(xtraflags1 | CDSN_CTRL_ALE | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(docptr, 4);
-
- /* Send the address */
- switch (numbytes)
- {
- case 1:
- /* Send single byte, bits 0-7. */
- WriteDOC(ofs & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC(0x00, docptr, WritePipeTerm);
- break;
- case 2:
- /* Send bits 9-16 followed by 17-23 */
- WriteDOC((ofs >> 9) & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC((ofs >> 17) & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC(0x00, docptr, WritePipeTerm);
- break;
- case 3:
- /* Send 0-7, 9-16, then 17-23 */
- WriteDOC(ofs & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC((ofs >> 9) & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC((ofs >> 17) & 0xff, docptr, Mil_CDSN_IO);
- WriteDOC(0x00, docptr, WritePipeTerm);
- break;
- default:
- return;
- }
-
- /* Lower the ALE line */
- WriteDOC(xtraflags1 | xtraflags2 | CDSN_CTRL_CE, docptr, CDSNControl);
- DoC_Delay(docptr, 4);
-}
-
-/* DoC_SelectChip: Select a given flash chip within the current floor */
-static int DoC_SelectChip(void __iomem * docptr, int chip)
-{
- /* Select the individual flash chip requested */
- WriteDOC(chip, docptr, CDSNDeviceSelect);
- DoC_Delay(docptr, 4);
-
- /* Wait for it to be ready */
- return DoC_WaitReady(docptr);
-}
-
-/* DoC_SelectFloor: Select a given floor (bank of flash chips) */
-static int DoC_SelectFloor(void __iomem * docptr, int floor)
-{
- /* Select the floor (bank) of chips required */
- WriteDOC(floor, docptr, FloorSelect);
-
- /* Wait for the chip to be ready */
- return DoC_WaitReady(docptr);
-}
-
-/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */
-static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
-{
- int mfr, id, i, j;
- volatile char dummy;
-
- /* Page in the required floor/chip
- FIXME: is this supported by Millennium ?? */
- DoC_SelectFloor(doc->virtadr, floor);
- DoC_SelectChip(doc->virtadr, chip);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(doc->virtadr, NAND_CMD_RESET, CDSN_CTRL_WP);
- DoC_WaitReady(doc->virtadr);
-
- /* Read the NAND chip ID: 1. Send ReadID command */
- DoC_Command(doc->virtadr, NAND_CMD_READID, CDSN_CTRL_WP);
-
- /* Read the NAND chip ID: 2. Send address byte zero */
- DoC_Address(doc->virtadr, 1, 0x00, CDSN_CTRL_WP, 0x00);
-
- /* Read the manufacturer and device id codes of the flash device through
- CDSN IO register see Software Requirement 11.4 item 5.*/
- dummy = ReadDOC(doc->virtadr, ReadPipeInit);
- DoC_Delay(doc->virtadr, 2);
- mfr = ReadDOC(doc->virtadr, Mil_CDSN_IO);
-
- DoC_Delay(doc->virtadr, 2);
- id = ReadDOC(doc->virtadr, Mil_CDSN_IO);
- dummy = ReadDOC(doc->virtadr, LastDataRead);
-
- /* No response - return failure */
- if (mfr == 0xff || mfr == 0)
- return 0;
-
- /* FIXME: to deal with multi-flash on multi-Millennium case more carefully */
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if ( id == nand_flash_ids[i].id) {
- /* Try to identify manufacturer */
- for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
- if (nand_manuf_ids[j].id == mfr)
- break;
- }
- printk(KERN_INFO "Flash chip found: Manufacturer ID: %2.2X, "
- "Chip ID: %2.2X (%s:%s)\n",
- mfr, id, nand_manuf_ids[j].name, nand_flash_ids[i].name);
- doc->mfr = mfr;
- doc->id = id;
- doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1;
- break;
- }
- }
-
- if (nand_flash_ids[i].name == NULL)
- return 0;
- else
- return 1;
-}
-
-/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
-static void DoC_ScanChips(struct DiskOnChip *this)
-{
- int floor, chip;
- int numchips[MAX_FLOORS_MIL];
- int ret;
-
- this->numchips = 0;
- this->mfr = 0;
- this->id = 0;
-
- /* For each floor, find the number of valid chips it contains */
- for (floor = 0,ret = 1; floor < MAX_FLOORS_MIL; floor++) {
- numchips[floor] = 0;
- for (chip = 0; chip < MAX_CHIPS_MIL && ret != 0; chip++) {
- ret = DoC_IdentChip(this, floor, chip);
- if (ret) {
- numchips[floor]++;
- this->numchips++;
- }
- }
- }
- /* If there are none at all that we recognise, bail */
- if (!this->numchips) {
- printk("No flash chips recognised.\n");
- return;
- }
-
- /* Allocate an array to hold the information for each chip */
- this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL);
- if (!this->chips){
- printk("No memory for allocating chip info structures\n");
- return;
- }
-
- /* Fill out the chip array with {floor, chipno} for each
- * detected chip in the device. */
- for (floor = 0, ret = 0; floor < MAX_FLOORS_MIL; floor++) {
- for (chip = 0 ; chip < numchips[floor] ; chip++) {
- this->chips[ret].floor = floor;
- this->chips[ret].chip = chip;
- this->chips[ret].curadr = 0;
- this->chips[ret].curmode = 0x50;
- ret++;
- }
- }
-
- /* Calculate and print the total size of the device */
- this->totlen = this->numchips * (1 << this->chipshift);
- printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n",
- this->numchips ,this->totlen >> 20);
-}
-
-static int DoCMil_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
-{
- int tmp1, tmp2, retval;
-
- if (doc1->physadr == doc2->physadr)
- return 1;
-
- /* Use the alias resolution register which was set aside for this
- * purpose. If it's value is the same on both chips, they might
- * be the same chip, and we write to one and check for a change in
- * the other. It's unclear if this register is usuable in the
- * DoC 2000 (it's in the Millenium docs), but it seems to work. */
- tmp1 = ReadDOC(doc1->virtadr, AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
- if (tmp1 != tmp2)
- return 0;
-
- WriteDOC((tmp1+1) % 0xff, doc1->virtadr, AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
- if (tmp2 == (tmp1+1) % 0xff)
- retval = 1;
- else
- retval = 0;
-
- /* Restore register contents. May not be necessary, but do it just to
- * be safe. */
- WriteDOC(tmp1, doc1->virtadr, AliasResolution);
-
- return retval;
-}
-
-/* This routine is found from the docprobe code by symbol_get(),
- * which will bump the use count of this module. */
-void DoCMil_init(struct mtd_info *mtd)
-{
- struct DiskOnChip *this = mtd->priv;
- struct DiskOnChip *old = NULL;
-
- /* We must avoid being called twice for the same device. */
- if (docmillist)
- old = docmillist->priv;
-
- while (old) {
- if (DoCMil_is_alias(this, old)) {
- printk(KERN_NOTICE "Ignoring DiskOnChip Millennium at "
- "0x%lX - already configured\n", this->physadr);
- iounmap(this->virtadr);
- kfree(mtd);
- return;
- }
- if (old->nextdoc)
- old = old->nextdoc->priv;
- else
- old = NULL;
- }
-
- mtd->name = "DiskOnChip Millennium";
- printk(KERN_NOTICE "DiskOnChip Millennium found at address 0x%lX\n",
- this->physadr);
-
- mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
-
- /* FIXME: erase size is not always 8KiB */
- mtd->erasesize = 0x2000;
- mtd->writebufsize = mtd->writesize = 512;
- mtd->oobsize = 16;
- mtd->ecc_strength = 2;
- mtd->owner = THIS_MODULE;
- mtd->_erase = doc_erase;
- mtd->_read = doc_read;
- mtd->_write = doc_write;
- mtd->_read_oob = doc_read_oob;
- mtd->_write_oob = doc_write_oob;
- this->curfloor = -1;
- this->curchip = -1;
-
- /* Ident all the chips present. */
- DoC_ScanChips(this);
-
- if (!this->totlen) {
- kfree(mtd);
- iounmap(this->virtadr);
- } else {
- this->nextdoc = docmillist;
- docmillist = mtd;
- mtd->size = this->totlen;
- mtd_device_register(mtd, NULL, 0);
- return;
- }
-}
-EXPORT_SYMBOL_GPL(DoCMil_init);
-
-static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- int i, ret;
- volatile char dummy;
- unsigned char syndrome[6], eccbuf[6];
- struct DiskOnChip *this = mtd->priv;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[from >> (this->chipshift)];
-
- /* Don't allow a single read to cross a 512-byte block boundary */
- if (from + len > ((from | 0x1ff) + 1))
- len = ((from | 0x1ff) + 1) - from;
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* issue the Read0 or Read1 command depend on which half of the page
- we are accessing. Polling the Flash Ready bit after issue 3 bytes
- address in Sequence Read Mode, see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, (from >> 8) & 1, CDSN_CTRL_WP);
- DoC_Address(docptr, 3, from, CDSN_CTRL_WP, 0x00);
- DoC_WaitReady(docptr);
-
- /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
- WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC (DOC_ECC_EN, docptr, ECCConf);
-
- /* Read the data via the internal pipeline through CDSN IO register,
- see Pipelined Read Operations 11.3 */
- dummy = ReadDOC(docptr, ReadPipeInit);
-#ifndef USE_MEMCPY
- for (i = 0; i < len-1; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
- }
-#else
- memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len - 1);
-#endif
- buf[len - 1] = ReadDOC(docptr, LastDataRead);
-
- /* Let the caller know we completed it */
- *retlen = len;
- ret = 0;
-
- /* Read the ECC data from Spare Data Area,
- see Reed-Solomon EDC/ECC 11.1 */
- dummy = ReadDOC(docptr, ReadPipeInit);
-#ifndef USE_MEMCPY
- for (i = 0; i < 5; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- eccbuf[i] = ReadDOC(docptr, Mil_CDSN_IO + i);
- }
-#else
- memcpy_fromio(eccbuf, docptr + DoC_Mil_CDSN_IO, 5);
-#endif
- eccbuf[5] = ReadDOC(docptr, LastDataRead);
-
- /* Flush the pipeline */
- dummy = ReadDOC(docptr, ECCConf);
- dummy = ReadDOC(docptr, ECCConf);
-
- /* Check the ECC Status */
- if (ReadDOC(docptr, ECCConf) & 0x80) {
- int nb_errors;
- /* There was an ECC error */
-#ifdef ECC_DEBUG
- printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
-#endif
- /* Read the ECC syndrome through the DiskOnChip ECC logic.
- These syndrome will be all ZERO when there is no error */
- for (i = 0; i < 6; i++) {
- syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i);
- }
- nb_errors = doc_decode_ecc(buf, syndrome);
-#ifdef ECC_DEBUG
- printk("ECC Errors corrected: %x\n", nb_errors);
-#endif
- if (nb_errors < 0) {
- /* We return error, but have actually done the read. Not that
- this can be told to user-space, via sys_read(), but at least
- MTD-aware stuff can know about it by checking *retlen */
- ret = -EIO;
- }
- }
-
-#ifdef PSYCHO_DEBUG
- printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
- eccbuf[4], eccbuf[5]);
-#endif
-
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
-
- return ret;
-}
-
-static int doc_write (struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- int i,ret = 0;
- char eccbuf[6];
- volatile char dummy;
- struct DiskOnChip *this = mtd->priv;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[to >> (this->chipshift)];
-
-#if 0
- /* Don't allow a single write to cross a 512-byte block boundary */
- if (to + len > ( (to | 0x1ff) + 1))
- len = ((to | 0x1ff) + 1) - to;
-#else
- /* Don't allow writes which aren't exactly one block */
- if (to & 0x1ff || len != 0x200)
- return -EINVAL;
-#endif
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0x00);
- DoC_WaitReady(docptr);
- /* Set device to main plane of flash */
- DoC_Command(docptr, NAND_CMD_READ0, 0x00);
-
- /* issue the Serial Data In command to initial the Page Program process */
- DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
- DoC_Address(docptr, 3, to, 0x00, 0x00);
- DoC_WaitReady(docptr);
-
- /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
- WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC (DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
-
- /* Write the data via the internal pipeline through CDSN IO register,
- see Pipelined Write Operations 11.2 */
-#ifndef USE_MEMCPY
- for (i = 0; i < len; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- WriteDOC(buf[i], docptr, Mil_CDSN_IO + i);
- }
-#else
- memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len);
-#endif
- WriteDOC(0x00, docptr, WritePipeTerm);
-
- /* Write ECC data to flash, the ECC info is generated by the DiskOnChip ECC logic
- see Reed-Solomon EDC/ECC 11.1 */
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
-
- /* Read the ECC data through the DiskOnChip ECC logic */
- for (i = 0; i < 6; i++) {
- eccbuf[i] = ReadDOC(docptr, ECCSyndrome0 + i);
- }
-
- /* ignore the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
-
-#ifndef USE_MEMCPY
- /* Write the ECC data to flash */
- for (i = 0; i < 6; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- WriteDOC(eccbuf[i], docptr, Mil_CDSN_IO + i);
- }
-#else
- memcpy_toio(docptr + DoC_Mil_CDSN_IO, eccbuf, 6);
-#endif
-
- /* write the block status BLOCK_USED (0x5555) at the end of ECC data
- FIXME: this is only a hack for programming the IPL area for LinuxBIOS
- and should be replace with proper codes in user space utilities */
- WriteDOC(0x55, docptr, Mil_CDSN_IO);
- WriteDOC(0x55, docptr, Mil_CDSN_IO + 1);
-
- WriteDOC(0x00, docptr, WritePipeTerm);
-
-#ifdef PSYCHO_DEBUG
- printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
- eccbuf[4], eccbuf[5]);
-#endif
-
- /* Commit the Page Program command and wait for ready
- see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
- DoC_WaitReady(docptr);
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5.*/
- DoC_Command(docptr, NAND_CMD_STATUS, CDSN_CTRL_WP);
- dummy = ReadDOC(docptr, ReadPipeInit);
- DoC_Delay(docptr, 2);
- if (ReadDOC(docptr, Mil_CDSN_IO) & 1) {
- printk("Error programming flash\n");
- /* Error in programming
- FIXME: implement Bad Block Replacement (in nftl.c ??) */
- ret = -EIO;
- }
- dummy = ReadDOC(docptr, LastDataRead);
-
- /* Let the caller know we completed it */
- *retlen = len;
-
- return ret;
-}
-
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
-#ifndef USE_MEMCPY
- int i;
-#endif
- volatile char dummy;
- struct DiskOnChip *this = mtd->priv;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
- uint8_t *buf = ops->oobbuf;
- size_t len = ops->len;
-
- BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
- ofs += ops->ooboffs;
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* disable the ECC engine */
- WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
-
- /* issue the Read2 command to set the pointer to the Spare Data Area.
- Polling the Flash Ready bit after issue 3 bytes address in
- Sequence Read Mode, see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_READOOB, CDSN_CTRL_WP);
- DoC_Address(docptr, 3, ofs, CDSN_CTRL_WP, 0x00);
- DoC_WaitReady(docptr);
-
- /* Read the data out via the internal pipeline through CDSN IO register,
- see Pipelined Read Operations 11.3 */
- dummy = ReadDOC(docptr, ReadPipeInit);
-#ifndef USE_MEMCPY
- for (i = 0; i < len-1; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- buf[i] = ReadDOC(docptr, Mil_CDSN_IO + i);
- }
-#else
- memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len - 1);
-#endif
- buf[len - 1] = ReadDOC(docptr, LastDataRead);
-
- ops->retlen = len;
-
- return 0;
-}
-
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
-#ifndef USE_MEMCPY
- int i;
-#endif
- volatile char dummy;
- int ret = 0;
- struct DiskOnChip *this = mtd->priv;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
- uint8_t *buf = ops->oobbuf;
- size_t len = ops->len;
-
- BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
- ofs += ops->ooboffs;
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* disable the ECC engine */
- WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, CDSN_CTRL_WP);
- DoC_WaitReady(docptr);
- /* issue the Read2 command to set the pointer to the Spare Data Area. */
- DoC_Command(docptr, NAND_CMD_READOOB, CDSN_CTRL_WP);
-
- /* issue the Serial Data In command to initial the Page Program process */
- DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
- DoC_Address(docptr, 3, ofs, 0x00, 0x00);
-
- /* Write the data via the internal pipeline through CDSN IO register,
- see Pipelined Write Operations 11.2 */
-#ifndef USE_MEMCPY
- for (i = 0; i < len; i++) {
- /* N.B. you have to increase the source address in this way or the
- ECC logic will not work properly */
- WriteDOC(buf[i], docptr, Mil_CDSN_IO + i);
- }
-#else
- memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len);
-#endif
- WriteDOC(0x00, docptr, WritePipeTerm);
-
- /* Commit the Page Program command and wait for ready
- see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
- DoC_WaitReady(docptr);
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5.*/
- DoC_Command(docptr, NAND_CMD_STATUS, 0x00);
- dummy = ReadDOC(docptr, ReadPipeInit);
- DoC_Delay(docptr, 2);
- if (ReadDOC(docptr, Mil_CDSN_IO) & 1) {
- printk("Error programming oob data\n");
- /* FIXME: implement Bad Block Replacement (in nftl.c ??) */
- ops->retlen = 0;
- ret = -EIO;
- }
- dummy = ReadDOC(docptr, LastDataRead);
-
- ops->retlen = len;
-
- return ret;
-}
-
-int doc_erase (struct mtd_info *mtd, struct erase_info *instr)
-{
- volatile char dummy;
- struct DiskOnChip *this = mtd->priv;
- __u32 ofs = instr->addr;
- __u32 len = instr->len;
- void __iomem *docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
-
- if (len != mtd->erasesize)
- printk(KERN_WARNING "Erase not right size (%x != %x)n",
- len, mtd->erasesize);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- instr->state = MTD_ERASE_PENDING;
-
- /* issue the Erase Setup command */
- DoC_Command(docptr, NAND_CMD_ERASE1, 0x00);
- DoC_Address(docptr, 2, ofs, 0x00, 0x00);
-
- /* Commit the Erase Start command and wait for ready
- see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_ERASE2, 0x00);
- DoC_WaitReady(docptr);
-
- instr->state = MTD_ERASING;
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5.
- FIXME: it seems that we are not wait long enough, some blocks are not
- erased fully */
- DoC_Command(docptr, NAND_CMD_STATUS, CDSN_CTRL_WP);
- dummy = ReadDOC(docptr, ReadPipeInit);
- DoC_Delay(docptr, 2);
- if (ReadDOC(docptr, Mil_CDSN_IO) & 1) {
- printk("Error Erasing at 0x%x\n", ofs);
- /* There was an error
- FIXME: implement Bad Block Replacement (in nftl.c ??) */
- instr->state = MTD_ERASE_FAILED;
- } else
- instr->state = MTD_ERASE_DONE;
- dummy = ReadDOC(docptr, LastDataRead);
-
- mtd_erase_callback(instr);
-
- return 0;
-}
-
-/****************************************************************************
- *
- * Module stuff
- *
- ****************************************************************************/
-
-static void __exit cleanup_doc2001(void)
-{
- struct mtd_info *mtd;
- struct DiskOnChip *this;
-
- while ((mtd=docmillist)) {
- this = mtd->priv;
- docmillist = this->nextdoc;
-
- mtd_device_unregister(mtd);
-
- iounmap(this->virtadr);
- kfree(this->chips);
- kfree(mtd);
- }
-}
-
-module_exit(cleanup_doc2001);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
-MODULE_DESCRIPTION("Alternative driver for DiskOnChip Millennium");
+++ /dev/null
-/*
- * Linux driver for Disk-On-Chip Millennium Plus
- *
- * (c) 2002-2003 Greg Ungerer <gerg@snapgear.com>
- * (c) 2002-2003 SnapGear Inc
- * (c) 1999 Machine Vision Holdings, Inc.
- * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
- *
- * Released under GPL
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/doc2000.h>
-
-/* #define ECC_DEBUG */
-
-/* I have no idea why some DoC chips can not use memcop_form|to_io().
- * This may be due to the different revisions of the ASIC controller built-in or
- * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment
- * this:*/
-#undef USE_MEMCPY
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf);
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf);
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops);
-static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
-
-static struct mtd_info *docmilpluslist = NULL;
-
-
-/* Perform the required delay cycles by writing to the NOP register */
-static void DoC_Delay(void __iomem * docptr, int cycles)
-{
- int i;
-
- for (i = 0; (i < cycles); i++)
- WriteDOC(0, docptr, Mplus_NOP);
-}
-
-#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
-
-/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
-static int _DoC_WaitReady(void __iomem * docptr)
-{
- unsigned int c = 0xffff;
-
- pr_debug("_DoC_WaitReady called for out-of-line wait\n");
-
- /* Out-of-line routine to wait for chip response */
- while (((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) && --c)
- ;
-
- if (c == 0)
- pr_debug("_DoC_WaitReady timed out.\n");
-
- return (c == 0);
-}
-
-static inline int DoC_WaitReady(void __iomem * docptr)
-{
- /* This is inline, to optimise the common case, where it's ready instantly */
- int ret = 0;
-
- /* read form NOP register should be issued prior to the read from CDSNControl
- see Software Requirement 11.4 item 2. */
- DoC_Delay(docptr, 4);
-
- if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
- /* Call the out-of-line routine to wait */
- ret = _DoC_WaitReady(docptr);
-
- return ret;
-}
-
-/* For some reason the Millennium Plus seems to occasionally put itself
- * into reset mode. For me this happens randomly, with no pattern that I
- * can detect. M-systems suggest always check this on any block level
- * operation and setting to normal mode if in reset mode.
- */
-static inline void DoC_CheckASIC(void __iomem * docptr)
-{
- /* Make sure the DoC is in normal mode */
- if ((ReadDOC(docptr, Mplus_DOCControl) & DOC_MODE_NORMAL) == 0) {
- WriteDOC((DOC_MODE_NORMAL | DOC_MODE_MDWREN), docptr, Mplus_DOCControl);
- WriteDOC(~(DOC_MODE_NORMAL | DOC_MODE_MDWREN), docptr, Mplus_CtrlConfirm);
- }
-}
-
-/* DoC_Command: Send a flash command to the flash chip through the Flash
- * command register. Need 2 Write Pipeline Terminates to complete send.
- */
-static void DoC_Command(void __iomem * docptr, unsigned char command,
- unsigned char xtraflags)
-{
- WriteDOC(command, docptr, Mplus_FlashCmd);
- WriteDOC(command, docptr, Mplus_WritePipeTerm);
- WriteDOC(command, docptr, Mplus_WritePipeTerm);
-}
-
-/* DoC_Address: Set the current address for the flash chip through the Flash
- * Address register. Need 2 Write Pipeline Terminates to complete send.
- */
-static inline void DoC_Address(struct DiskOnChip *doc, int numbytes,
- unsigned long ofs, unsigned char xtraflags1,
- unsigned char xtraflags2)
-{
- void __iomem * docptr = doc->virtadr;
-
- /* Allow for possible Mill Plus internal flash interleaving */
- ofs >>= doc->interleave;
-
- switch (numbytes) {
- case 1:
- /* Send single byte, bits 0-7. */
- WriteDOC(ofs & 0xff, docptr, Mplus_FlashAddress);
- break;
- case 2:
- /* Send bits 9-16 followed by 17-23 */
- WriteDOC((ofs >> 9) & 0xff, docptr, Mplus_FlashAddress);
- WriteDOC((ofs >> 17) & 0xff, docptr, Mplus_FlashAddress);
- break;
- case 3:
- /* Send 0-7, 9-16, then 17-23 */
- WriteDOC(ofs & 0xff, docptr, Mplus_FlashAddress);
- WriteDOC((ofs >> 9) & 0xff, docptr, Mplus_FlashAddress);
- WriteDOC((ofs >> 17) & 0xff, docptr, Mplus_FlashAddress);
- break;
- default:
- return;
- }
-
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-}
-
-/* DoC_SelectChip: Select a given flash chip within the current floor */
-static int DoC_SelectChip(void __iomem * docptr, int chip)
-{
- /* No choice for flash chip on Millennium Plus */
- return 0;
-}
-
-/* DoC_SelectFloor: Select a given floor (bank of flash chips) */
-static int DoC_SelectFloor(void __iomem * docptr, int floor)
-{
- WriteDOC((floor & 0x3), docptr, Mplus_DeviceSelect);
- return 0;
-}
-
-/*
- * Translate the given offset into the appropriate command and offset.
- * This does the mapping using the 16bit interleave layout defined by
- * M-Systems, and looks like this for a sector pair:
- * +-----------+-------+-------+-------+--------------+---------+-----------+
- * | 0 --- 511 |512-517|518-519|520-521| 522 --- 1033 |1034-1039|1040 - 1055|
- * +-----------+-------+-------+-------+--------------+---------+-----------+
- * | Data 0 | ECC 0 |Flags0 |Flags1 | Data 1 |ECC 1 | OOB 1 + 2 |
- * +-----------+-------+-------+-------+--------------+---------+-----------+
- */
-/* FIXME: This lives in INFTL not here. Other users of flash devices
- may not want it */
-static unsigned int DoC_GetDataOffset(struct mtd_info *mtd, loff_t *from)
-{
- struct DiskOnChip *this = mtd->priv;
-
- if (this->interleave) {
- unsigned int ofs = *from & 0x3ff;
- unsigned int cmd;
-
- if (ofs < 512) {
- cmd = NAND_CMD_READ0;
- ofs &= 0x1ff;
- } else if (ofs < 1014) {
- cmd = NAND_CMD_READ1;
- ofs = (ofs & 0x1ff) + 10;
- } else {
- cmd = NAND_CMD_READOOB;
- ofs = ofs - 1014;
- }
-
- *from = (*from & ~0x3ff) | ofs;
- return cmd;
- } else {
- /* No interleave */
- if ((*from) & 0x100)
- return NAND_CMD_READ1;
- return NAND_CMD_READ0;
- }
-}
-
-static unsigned int DoC_GetECCOffset(struct mtd_info *mtd, loff_t *from)
-{
- unsigned int ofs, cmd;
-
- if (*from & 0x200) {
- cmd = NAND_CMD_READOOB;
- ofs = 10 + (*from & 0xf);
- } else {
- cmd = NAND_CMD_READ1;
- ofs = (*from & 0xf);
- }
-
- *from = (*from & ~0x3ff) | ofs;
- return cmd;
-}
-
-static unsigned int DoC_GetFlagsOffset(struct mtd_info *mtd, loff_t *from)
-{
- unsigned int ofs, cmd;
-
- cmd = NAND_CMD_READ1;
- ofs = (*from & 0x200) ? 8 : 6;
- *from = (*from & ~0x3ff) | ofs;
- return cmd;
-}
-
-static unsigned int DoC_GetHdrOffset(struct mtd_info *mtd, loff_t *from)
-{
- unsigned int ofs, cmd;
-
- cmd = NAND_CMD_READOOB;
- ofs = (*from & 0x200) ? 24 : 16;
- *from = (*from & ~0x3ff) | ofs;
- return cmd;
-}
-
-static inline void MemReadDOC(void __iomem * docptr, unsigned char *buf, int len)
-{
-#ifndef USE_MEMCPY
- int i;
- for (i = 0; i < len; i++)
- buf[i] = ReadDOC(docptr, Mil_CDSN_IO + i);
-#else
- memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len);
-#endif
-}
-
-static inline void MemWriteDOC(void __iomem * docptr, unsigned char *buf, int len)
-{
-#ifndef USE_MEMCPY
- int i;
- for (i = 0; i < len; i++)
- WriteDOC(buf[i], docptr, Mil_CDSN_IO + i);
-#else
- memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len);
-#endif
-}
-
-/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */
-static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
-{
- int mfr, id, i, j;
- volatile char dummy;
- void __iomem * docptr = doc->virtadr;
-
- /* Page in the required floor/chip */
- DoC_SelectFloor(docptr, floor);
- DoC_SelectChip(docptr, chip);
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0);
- DoC_WaitReady(docptr);
-
- /* Read the NAND chip ID: 1. Send ReadID command */
- DoC_Command(docptr, NAND_CMD_READID, 0);
-
- /* Read the NAND chip ID: 2. Send address byte zero */
- DoC_Address(doc, 1, 0x00, 0, 0x00);
-
- WriteDOC(0, docptr, Mplus_FlashControl);
- DoC_WaitReady(docptr);
-
- /* Read the manufacturer and device id codes of the flash device through
- CDSN IO register see Software Requirement 11.4 item 5.*/
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
-
- mfr = ReadDOC(docptr, Mil_CDSN_IO);
- if (doc->interleave)
- dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */
-
- id = ReadDOC(docptr, Mil_CDSN_IO);
- if (doc->interleave)
- dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */
-
- dummy = ReadDOC(docptr, Mplus_LastDataRead);
- dummy = ReadDOC(docptr, Mplus_LastDataRead);
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- /* No response - return failure */
- if (mfr == 0xff || mfr == 0)
- return 0;
-
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if (id == nand_flash_ids[i].id) {
- /* Try to identify manufacturer */
- for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
- if (nand_manuf_ids[j].id == mfr)
- break;
- }
- printk(KERN_INFO "Flash chip found: Manufacturer ID: %2.2X, "
- "Chip ID: %2.2X (%s:%s)\n", mfr, id,
- nand_manuf_ids[j].name, nand_flash_ids[i].name);
- doc->mfr = mfr;
- doc->id = id;
- doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1;
- doc->erasesize = nand_flash_ids[i].erasesize << doc->interleave;
- break;
- }
- }
-
- if (nand_flash_ids[i].name == NULL)
- return 0;
- return 1;
-}
-
-/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
-static void DoC_ScanChips(struct DiskOnChip *this)
-{
- int floor, chip;
- int numchips[MAX_FLOORS_MPLUS];
- int ret;
-
- this->numchips = 0;
- this->mfr = 0;
- this->id = 0;
-
- /* Work out the intended interleave setting */
- this->interleave = 0;
- if (this->ChipID == DOC_ChipID_DocMilPlus32)
- this->interleave = 1;
-
- /* Check the ASIC agrees */
- if ( (this->interleave << 2) !=
- (ReadDOC(this->virtadr, Mplus_Configuration) & 4)) {
- u_char conf = ReadDOC(this->virtadr, Mplus_Configuration);
- printk(KERN_NOTICE "Setting DiskOnChip Millennium Plus interleave to %s\n",
- this->interleave?"on (16-bit)":"off (8-bit)");
- conf ^= 4;
- WriteDOC(conf, this->virtadr, Mplus_Configuration);
- }
-
- /* For each floor, find the number of valid chips it contains */
- for (floor = 0,ret = 1; floor < MAX_FLOORS_MPLUS; floor++) {
- numchips[floor] = 0;
- for (chip = 0; chip < MAX_CHIPS_MPLUS && ret != 0; chip++) {
- ret = DoC_IdentChip(this, floor, chip);
- if (ret) {
- numchips[floor]++;
- this->numchips++;
- }
- }
- }
- /* If there are none at all that we recognise, bail */
- if (!this->numchips) {
- printk("No flash chips recognised.\n");
- return;
- }
-
- /* Allocate an array to hold the information for each chip */
- this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL);
- if (!this->chips){
- printk("MTD: No memory for allocating chip info structures\n");
- return;
- }
-
- /* Fill out the chip array with {floor, chipno} for each
- * detected chip in the device. */
- for (floor = 0, ret = 0; floor < MAX_FLOORS_MPLUS; floor++) {
- for (chip = 0 ; chip < numchips[floor] ; chip++) {
- this->chips[ret].floor = floor;
- this->chips[ret].chip = chip;
- this->chips[ret].curadr = 0;
- this->chips[ret].curmode = 0x50;
- ret++;
- }
- }
-
- /* Calculate and print the total size of the device */
- this->totlen = this->numchips * (1 << this->chipshift);
- printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n",
- this->numchips ,this->totlen >> 20);
-}
-
-static int DoCMilPlus_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
-{
- int tmp1, tmp2, retval;
-
- if (doc1->physadr == doc2->physadr)
- return 1;
-
- /* Use the alias resolution register which was set aside for this
- * purpose. If it's value is the same on both chips, they might
- * be the same chip, and we write to one and check for a change in
- * the other. It's unclear if this register is usuable in the
- * DoC 2000 (it's in the Millennium docs), but it seems to work. */
- tmp1 = ReadDOC(doc1->virtadr, Mplus_AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, Mplus_AliasResolution);
- if (tmp1 != tmp2)
- return 0;
-
- WriteDOC((tmp1+1) % 0xff, doc1->virtadr, Mplus_AliasResolution);
- tmp2 = ReadDOC(doc2->virtadr, Mplus_AliasResolution);
- if (tmp2 == (tmp1+1) % 0xff)
- retval = 1;
- else
- retval = 0;
-
- /* Restore register contents. May not be necessary, but do it just to
- * be safe. */
- WriteDOC(tmp1, doc1->virtadr, Mplus_AliasResolution);
-
- return retval;
-}
-
-/* This routine is found from the docprobe code by symbol_get(),
- * which will bump the use count of this module. */
-void DoCMilPlus_init(struct mtd_info *mtd)
-{
- struct DiskOnChip *this = mtd->priv;
- struct DiskOnChip *old = NULL;
-
- /* We must avoid being called twice for the same device. */
- if (docmilpluslist)
- old = docmilpluslist->priv;
-
- while (old) {
- if (DoCMilPlus_is_alias(this, old)) {
- printk(KERN_NOTICE "Ignoring DiskOnChip Millennium "
- "Plus at 0x%lX - already configured\n",
- this->physadr);
- iounmap(this->virtadr);
- kfree(mtd);
- return;
- }
- if (old->nextdoc)
- old = old->nextdoc->priv;
- else
- old = NULL;
- }
-
- mtd->name = "DiskOnChip Millennium Plus";
- printk(KERN_NOTICE "DiskOnChip Millennium Plus found at "
- "address 0x%lX\n", this->physadr);
-
- mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
- mtd->writebufsize = mtd->writesize = 512;
- mtd->oobsize = 16;
- mtd->ecc_strength = 2;
- mtd->owner = THIS_MODULE;
- mtd->_erase = doc_erase;
- mtd->_read = doc_read;
- mtd->_write = doc_write;
- mtd->_read_oob = doc_read_oob;
- mtd->_write_oob = doc_write_oob;
- this->curfloor = -1;
- this->curchip = -1;
-
- /* Ident all the chips present. */
- DoC_ScanChips(this);
-
- if (!this->totlen) {
- kfree(mtd);
- iounmap(this->virtadr);
- } else {
- this->nextdoc = docmilpluslist;
- docmilpluslist = mtd;
- mtd->size = this->totlen;
- mtd->erasesize = this->erasesize;
- mtd_device_register(mtd, NULL, 0);
- return;
- }
-}
-EXPORT_SYMBOL_GPL(DoCMilPlus_init);
-
-#if 0
-static int doc_dumpblk(struct mtd_info *mtd, loff_t from)
-{
- int i;
- loff_t fofs;
- struct DiskOnChip *this = mtd->priv;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[from >> (this->chipshift)];
- unsigned char *bp, buf[1056];
- char c[32];
-
- from &= ~0x3ff;
-
- /* Don't allow read past end of device */
- if (from >= this->totlen)
- return -EINVAL;
-
- DoC_CheckASIC(docptr);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0);
- DoC_WaitReady(docptr);
-
- fofs = from;
- DoC_Command(docptr, DoC_GetDataOffset(mtd, &fofs), 0);
- DoC_Address(this, 3, fofs, 0, 0x00);
- WriteDOC(0, docptr, Mplus_FlashControl);
- DoC_WaitReady(docptr);
-
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
-
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
-
- /* Read the data via the internal pipeline through CDSN IO
- register, see Pipelined Read Operations 11.3 */
- MemReadDOC(docptr, buf, 1054);
- buf[1054] = ReadDOC(docptr, Mplus_LastDataRead);
- buf[1055] = ReadDOC(docptr, Mplus_LastDataRead);
-
- memset(&c[0], 0, sizeof(c));
- printk("DUMP OFFSET=%x:\n", (int)from);
-
- for (i = 0, bp = &buf[0]; (i < 1056); i++) {
- if ((i % 16) == 0)
- printk("%08x: ", i);
- printk(" %02x", *bp);
- c[(i & 0xf)] = ((*bp >= 0x20) && (*bp <= 0x7f)) ? *bp : '.';
- bp++;
- if (((i + 1) % 16) == 0)
- printk(" %s\n", c);
- }
- printk("\n");
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- return 0;
-}
-#endif
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- int ret, i;
- volatile char dummy;
- loff_t fofs;
- unsigned char syndrome[6], eccbuf[6];
- struct DiskOnChip *this = mtd->priv;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[from >> (this->chipshift)];
-
- /* Don't allow a single read to cross a 512-byte block boundary */
- if (from + len > ((from | 0x1ff) + 1))
- len = ((from | 0x1ff) + 1) - from;
-
- DoC_CheckASIC(docptr);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0);
- DoC_WaitReady(docptr);
-
- fofs = from;
- DoC_Command(docptr, DoC_GetDataOffset(mtd, &fofs), 0);
- DoC_Address(this, 3, fofs, 0, 0x00);
- WriteDOC(0, docptr, Mplus_FlashControl);
- DoC_WaitReady(docptr);
-
- /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
- WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
-
- /* Let the caller know we completed it */
- *retlen = len;
- ret = 0;
-
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
-
- /* Read the data via the internal pipeline through CDSN IO
- register, see Pipelined Read Operations 11.3 */
- MemReadDOC(docptr, buf, len);
-
- /* Read the ECC data following raw data */
- MemReadDOC(docptr, eccbuf, 4);
- eccbuf[4] = ReadDOC(docptr, Mplus_LastDataRead);
- eccbuf[5] = ReadDOC(docptr, Mplus_LastDataRead);
-
- /* Flush the pipeline */
- dummy = ReadDOC(docptr, Mplus_ECCConf);
- dummy = ReadDOC(docptr, Mplus_ECCConf);
-
- /* Check the ECC Status */
- if (ReadDOC(docptr, Mplus_ECCConf) & 0x80) {
- int nb_errors;
- /* There was an ECC error */
-#ifdef ECC_DEBUG
- printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
-#endif
- /* Read the ECC syndrome through the DiskOnChip ECC logic.
- These syndrome will be all ZERO when there is no error */
- for (i = 0; i < 6; i++)
- syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
-
- nb_errors = doc_decode_ecc(buf, syndrome);
-#ifdef ECC_DEBUG
- printk("ECC Errors corrected: %x\n", nb_errors);
-#endif
- if (nb_errors < 0) {
- /* We return error, but have actually done the
- read. Not that this can be told to user-space, via
- sys_read(), but at least MTD-aware stuff can know
- about it by checking *retlen */
-#ifdef ECC_DEBUG
- printk("%s(%d): Millennium Plus ECC error (from=0x%x:\n",
- __FILE__, __LINE__, (int)from);
- printk(" syndrome= %*phC\n", 6, syndrome);
- printk(" eccbuf= %*phC\n", 6, eccbuf);
-#endif
- ret = -EIO;
- }
- }
-
-#ifdef PSYCHO_DEBUG
- printk("ECC DATA at %lx: %*ph\n", (long)from, 6, eccbuf);
-#endif
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr , Mplus_ECCConf);
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- return ret;
-}
-
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- int i, before, ret = 0;
- loff_t fto;
- volatile char dummy;
- char eccbuf[6];
- struct DiskOnChip *this = mtd->priv;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[to >> (this->chipshift)];
-
- /* Don't allow writes which aren't exactly one block (512 bytes) */
- if ((to & 0x1ff) || (len != 0x200))
- return -EINVAL;
-
- /* Determine position of OOB flags, before or after data */
- before = (this->interleave && (to & 0x200));
-
- DoC_CheckASIC(docptr);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
-
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0);
- DoC_WaitReady(docptr);
-
- /* Set device to appropriate plane of flash */
- fto = to;
- WriteDOC(DoC_GetDataOffset(mtd, &fto), docptr, Mplus_FlashCmd);
-
- /* On interleaved devices the flags for 2nd half 512 are before data */
- if (before)
- fto -= 2;
-
- /* issue the Serial Data In command to initial the Page Program process */
- DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
- DoC_Address(this, 3, fto, 0x00, 0x00);
-
- /* Disable the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
-
- if (before) {
- /* Write the block status BLOCK_USED (0x5555) */
- WriteDOC(0x55, docptr, Mil_CDSN_IO);
- WriteDOC(0x55, docptr, Mil_CDSN_IO);
- }
-
- /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
- WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
-
- MemWriteDOC(docptr, (unsigned char *) buf, len);
-
- /* Write ECC data to flash, the ECC info is generated by
- the DiskOnChip ECC logic see Reed-Solomon EDC/ECC 11.1 */
- DoC_Delay(docptr, 3);
-
- /* Read the ECC data through the DiskOnChip ECC logic */
- for (i = 0; i < 6; i++)
- eccbuf[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
-
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
-
- /* Write the ECC data to flash */
- MemWriteDOC(docptr, eccbuf, 6);
-
- if (!before) {
- /* Write the block status BLOCK_USED (0x5555) */
- WriteDOC(0x55, docptr, Mil_CDSN_IO+6);
- WriteDOC(0x55, docptr, Mil_CDSN_IO+7);
- }
-
-#ifdef PSYCHO_DEBUG
- printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
- eccbuf[4], eccbuf[5]);
-#endif
-
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-
- /* Commit the Page Program command and wait for ready
- see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
- DoC_WaitReady(docptr);
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5.*/
- DoC_Command(docptr, NAND_CMD_STATUS, 0);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- DoC_Delay(docptr, 2);
- if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
- printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to);
- /* Error in programming
- FIXME: implement Bad Block Replacement (in nftl.c ??) */
- ret = -EIO;
- }
- dummy = ReadDOC(docptr, Mplus_LastDataRead);
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- /* Let the caller know we completed it */
- *retlen = len;
-
- return ret;
-}
-
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- loff_t fofs, base;
- struct DiskOnChip *this = mtd->priv;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
- size_t i, size, got, want;
- uint8_t *buf = ops->oobbuf;
- size_t len = ops->len;
-
- BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
- ofs += ops->ooboffs;
-
- DoC_CheckASIC(docptr);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
-
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
- DoC_WaitReady(docptr);
-
- /* Maximum of 16 bytes in the OOB region, so limit read to that */
- if (len > 16)
- len = 16;
- got = 0;
- want = len;
-
- for (i = 0; ((i < 3) && (want > 0)); i++) {
- /* Figure out which region we are accessing... */
- fofs = ofs;
- base = ofs & 0xf;
- if (!this->interleave) {
- DoC_Command(docptr, NAND_CMD_READOOB, 0);
- size = 16 - base;
- } else if (base < 6) {
- DoC_Command(docptr, DoC_GetECCOffset(mtd, &fofs), 0);
- size = 6 - base;
- } else if (base < 8) {
- DoC_Command(docptr, DoC_GetFlagsOffset(mtd, &fofs), 0);
- size = 8 - base;
- } else {
- DoC_Command(docptr, DoC_GetHdrOffset(mtd, &fofs), 0);
- size = 16 - base;
- }
- if (size > want)
- size = want;
-
- /* Issue read command */
- DoC_Address(this, 3, fofs, 0, 0x00);
- WriteDOC(0, docptr, Mplus_FlashControl);
- DoC_WaitReady(docptr);
-
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
- MemReadDOC(docptr, &buf[got], size - 2);
- buf[got + size - 2] = ReadDOC(docptr, Mplus_LastDataRead);
- buf[got + size - 1] = ReadDOC(docptr, Mplus_LastDataRead);
-
- ofs += size;
- got += size;
- want -= size;
- }
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- ops->retlen = len;
- return 0;
-}
-
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
- struct mtd_oob_ops *ops)
-{
- volatile char dummy;
- loff_t fofs, base;
- struct DiskOnChip *this = mtd->priv;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
- size_t i, size, got, want;
- int ret = 0;
- uint8_t *buf = ops->oobbuf;
- size_t len = ops->len;
-
- BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
- ofs += ops->ooboffs;
-
- DoC_CheckASIC(docptr);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
-
-
- /* Maximum of 16 bytes in the OOB region, so limit write to that */
- if (len > 16)
- len = 16;
- got = 0;
- want = len;
-
- for (i = 0; ((i < 3) && (want > 0)); i++) {
- /* Reset the chip, see Software Requirement 11.4 item 1. */
- DoC_Command(docptr, NAND_CMD_RESET, 0);
- DoC_WaitReady(docptr);
-
- /* Figure out which region we are accessing... */
- fofs = ofs;
- base = ofs & 0x0f;
- if (!this->interleave) {
- WriteDOC(NAND_CMD_READOOB, docptr, Mplus_FlashCmd);
- size = 16 - base;
- } else if (base < 6) {
- WriteDOC(DoC_GetECCOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
- size = 6 - base;
- } else if (base < 8) {
- WriteDOC(DoC_GetFlagsOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
- size = 8 - base;
- } else {
- WriteDOC(DoC_GetHdrOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
- size = 16 - base;
- }
- if (size > want)
- size = want;
-
- /* Issue the Serial Data In command to initial the Page Program process */
- DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
- DoC_Address(this, 3, fofs, 0, 0x00);
-
- /* Disable the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
-
- /* Write the data via the internal pipeline through CDSN IO
- register, see Pipelined Write Operations 11.2 */
- MemWriteDOC(docptr, (unsigned char *) &buf[got], size);
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-
- /* Commit the Page Program command and wait for ready
- see Software Requirement 11.4 item 1.*/
- DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
- DoC_WaitReady(docptr);
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5.*/
- DoC_Command(docptr, NAND_CMD_STATUS, 0x00);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- DoC_Delay(docptr, 2);
- if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
- printk("MTD: Error 0x%x programming oob at 0x%x\n",
- dummy, (int)ofs);
- /* FIXME: implement Bad Block Replacement */
- ops->retlen = 0;
- ret = -EIO;
- }
- dummy = ReadDOC(docptr, Mplus_LastDataRead);
-
- ofs += size;
- got += size;
- want -= size;
- }
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- ops->retlen = len;
- return ret;
-}
-
-int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
- volatile char dummy;
- struct DiskOnChip *this = mtd->priv;
- __u32 ofs = instr->addr;
- __u32 len = instr->len;
- void __iomem * docptr = this->virtadr;
- struct Nand *mychip = &this->chips[ofs >> this->chipshift];
-
- DoC_CheckASIC(docptr);
-
- if (len != mtd->erasesize)
- printk(KERN_WARNING "MTD: Erase not right size (%x != %x)n",
- len, mtd->erasesize);
-
- /* Find the chip which is to be used and select it */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(docptr, mychip->floor);
- DoC_SelectChip(docptr, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(docptr, mychip->chip);
- }
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
-
- instr->state = MTD_ERASE_PENDING;
-
- /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
- WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
-
- DoC_Command(docptr, NAND_CMD_RESET, 0x00);
- DoC_WaitReady(docptr);
-
- DoC_Command(docptr, NAND_CMD_ERASE1, 0);
- DoC_Address(this, 2, ofs, 0, 0x00);
- DoC_Command(docptr, NAND_CMD_ERASE2, 0);
- DoC_WaitReady(docptr);
- instr->state = MTD_ERASING;
-
- /* Read the status of the flash device through CDSN IO register
- see Software Requirement 11.4 item 5. */
- DoC_Command(docptr, NAND_CMD_STATUS, 0);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
- if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
- printk("MTD: Error 0x%x erasing at 0x%x\n", dummy, ofs);
- /* FIXME: implement Bad Block Replacement (in nftl.c ??) */
- instr->state = MTD_ERASE_FAILED;
- } else {
- instr->state = MTD_ERASE_DONE;
- }
- dummy = ReadDOC(docptr, Mplus_LastDataRead);
-
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
-
- mtd_erase_callback(instr);
-
- return 0;
-}
-
-/****************************************************************************
- *
- * Module stuff
- *
- ****************************************************************************/
-
-static void __exit cleanup_doc2001plus(void)
-{
- struct mtd_info *mtd;
- struct DiskOnChip *this;
-
- while ((mtd=docmilpluslist)) {
- this = mtd->priv;
- docmilpluslist = this->nextdoc;
-
- mtd_device_unregister(mtd);
-
- iounmap(this->virtadr);
- kfree(this->chips);
- kfree(mtd);
- }
-}
-
-module_exit(cleanup_doc2001plus);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com> et al.");
-MODULE_DESCRIPTION("Driver for DiskOnChip Millennium Plus");
+++ /dev/null
-/*
- * ECC algorithm for M-systems disk on chip. We use the excellent Reed
- * Solmon code of Phil Karn (karn@ka9q.ampr.org) available under the
- * GNU GPL License. The rest is simply to convert the disk on chip
- * syndrome into a standard syndome.
- *
- * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
- * Copyright (C) 2000 Netgem S.A.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/doc2000.h>
-
-#define DEBUG_ECC 0
-/* need to undef it (from asm/termbits.h) */
-#undef B0
-
-#define MM 10 /* Symbol size in bits */
-#define KK (1023-4) /* Number of data symbols per block */
-#define B0 510 /* First root of generator polynomial, alpha form */
-#define PRIM 1 /* power of alpha used to generate roots of generator poly */
-#define NN ((1 << MM) - 1)
-
-typedef unsigned short dtype;
-
-/* 1+x^3+x^10 */
-static const int Pp[MM+1] = { 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1 };
-
-/* This defines the type used to store an element of the Galois Field
- * used by the code. Make sure this is something larger than a char if
- * if anything larger than GF(256) is used.
- *
- * Note: unsigned char will work up to GF(256) but int seems to run
- * faster on the Pentium.
- */
-typedef int gf;
-
-/* No legal value in index form represents zero, so
- * we need a special value for this purpose
- */
-#define A0 (NN)
-
-/* Compute x % NN, where NN is 2**MM - 1,
- * without a slow divide
- */
-static inline gf
-modnn(int x)
-{
- while (x >= NN) {
- x -= NN;
- x = (x >> MM) + (x & NN);
- }
- return x;
-}
-
-#define CLEAR(a,n) {\
-int ci;\
-for(ci=(n)-1;ci >=0;ci--)\
-(a)[ci] = 0;\
-}
-
-#define COPY(a,b,n) {\
-int ci;\
-for(ci=(n)-1;ci >=0;ci--)\
-(a)[ci] = (b)[ci];\
-}
-
-#define COPYDOWN(a,b,n) {\
-int ci;\
-for(ci=(n)-1;ci >=0;ci--)\
-(a)[ci] = (b)[ci];\
-}
-
-#define Ldec 1
-
-/* generate GF(2**m) from the irreducible polynomial p(X) in Pp[0]..Pp[m]
- lookup tables: index->polynomial form alpha_to[] contains j=alpha**i;
- polynomial form -> index form index_of[j=alpha**i] = i
- alpha=2 is the primitive element of GF(2**m)
- HARI's COMMENT: (4/13/94) alpha_to[] can be used as follows:
- Let @ represent the primitive element commonly called "alpha" that
- is the root of the primitive polynomial p(x). Then in GF(2^m), for any
- 0 <= i <= 2^m-2,
- @^i = a(0) + a(1) @ + a(2) @^2 + ... + a(m-1) @^(m-1)
- where the binary vector (a(0),a(1),a(2),...,a(m-1)) is the representation
- of the integer "alpha_to[i]" with a(0) being the LSB and a(m-1) the MSB. Thus for
- example the polynomial representation of @^5 would be given by the binary
- representation of the integer "alpha_to[5]".
- Similarly, index_of[] can be used as follows:
- As above, let @ represent the primitive element of GF(2^m) that is
- the root of the primitive polynomial p(x). In order to find the power
- of @ (alpha) that has the polynomial representation
- a(0) + a(1) @ + a(2) @^2 + ... + a(m-1) @^(m-1)
- we consider the integer "i" whose binary representation with a(0) being LSB
- and a(m-1) MSB is (a(0),a(1),...,a(m-1)) and locate the entry
- "index_of[i]". Now, @^index_of[i] is that element whose polynomial
- representation is (a(0),a(1),a(2),...,a(m-1)).
- NOTE:
- The element alpha_to[2^m-1] = 0 always signifying that the
- representation of "@^infinity" = 0 is (0,0,0,...,0).
- Similarly, the element index_of[0] = A0 always signifying
- that the power of alpha which has the polynomial representation
- (0,0,...,0) is "infinity".
-
-*/
-
-static void
-generate_gf(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1])
-{
- register int i, mask;
-
- mask = 1;
- Alpha_to[MM] = 0;
- for (i = 0; i < MM; i++) {
- Alpha_to[i] = mask;
- Index_of[Alpha_to[i]] = i;
- /* If Pp[i] == 1 then, term @^i occurs in poly-repr of @^MM */
- if (Pp[i] != 0)
- Alpha_to[MM] ^= mask; /* Bit-wise EXOR operation */
- mask <<= 1; /* single left-shift */
- }
- Index_of[Alpha_to[MM]] = MM;
- /*
- * Have obtained poly-repr of @^MM. Poly-repr of @^(i+1) is given by
- * poly-repr of @^i shifted left one-bit and accounting for any @^MM
- * term that may occur when poly-repr of @^i is shifted.
- */
- mask >>= 1;
- for (i = MM + 1; i < NN; i++) {
- if (Alpha_to[i - 1] >= mask)
- Alpha_to[i] = Alpha_to[MM] ^ ((Alpha_to[i - 1] ^ mask) << 1);
- else
- Alpha_to[i] = Alpha_to[i - 1] << 1;
- Index_of[Alpha_to[i]] = i;
- }
- Index_of[0] = A0;
- Alpha_to[NN] = 0;
-}
-
-/*
- * Performs ERRORS+ERASURES decoding of RS codes. bb[] is the content
- * of the feedback shift register after having processed the data and
- * the ECC.
- *
- * Return number of symbols corrected, or -1 if codeword is illegal
- * or uncorrectable. If eras_pos is non-null, the detected error locations
- * are written back. NOTE! This array must be at least NN-KK elements long.
- * The corrected data are written in eras_val[]. They must be xor with the data
- * to retrieve the correct data : data[erase_pos[i]] ^= erase_val[i] .
- *
- * First "no_eras" erasures are declared by the calling program. Then, the
- * maximum # of errors correctable is t_after_eras = floor((NN-KK-no_eras)/2).
- * If the number of channel errors is not greater than "t_after_eras" the
- * transmitted codeword will be recovered. Details of algorithm can be found
- * in R. Blahut's "Theory ... of Error-Correcting Codes".
-
- * Warning: the eras_pos[] array must not contain duplicate entries; decoder failure
- * will result. The decoder *could* check for this condition, but it would involve
- * extra time on every decoding operation.
- * */
-static int
-eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
- gf bb[NN - KK + 1], gf eras_val[NN-KK], int eras_pos[NN-KK],
- int no_eras)
-{
- int deg_lambda, el, deg_omega;
- int i, j, r,k;
- gf u,q,tmp,num1,num2,den,discr_r;
- gf lambda[NN-KK + 1], s[NN-KK + 1]; /* Err+Eras Locator poly
- * and syndrome poly */
- gf b[NN-KK + 1], t[NN-KK + 1], omega[NN-KK + 1];
- gf root[NN-KK], reg[NN-KK + 1], loc[NN-KK];
- int syn_error, count;
-
- syn_error = 0;
- for(i=0;i<NN-KK;i++)
- syn_error |= bb[i];
-
- if (!syn_error) {
- /* if remainder is zero, data[] is a codeword and there are no
- * errors to correct. So return data[] unmodified
- */
- count = 0;
- goto finish;
- }
-
- for(i=1;i<=NN-KK;i++){
- s[i] = bb[0];
- }
- for(j=1;j<NN-KK;j++){
- if(bb[j] == 0)
- continue;
- tmp = Index_of[bb[j]];
-
- for(i=1;i<=NN-KK;i++)
- s[i] ^= Alpha_to[modnn(tmp + (B0+i-1)*PRIM*j)];
- }
-
- /* undo the feedback register implicit multiplication and convert
- syndromes to index form */
-
- for(i=1;i<=NN-KK;i++) {
- tmp = Index_of[s[i]];
- if (tmp != A0)
- tmp = modnn(tmp + 2 * KK * (B0+i-1)*PRIM);
- s[i] = tmp;
- }
-
- CLEAR(&lambda[1],NN-KK);
- lambda[0] = 1;
-
- if (no_eras > 0) {
- /* Init lambda to be the erasure locator polynomial */
- lambda[1] = Alpha_to[modnn(PRIM * eras_pos[0])];
- for (i = 1; i < no_eras; i++) {
- u = modnn(PRIM*eras_pos[i]);
- for (j = i+1; j > 0; j--) {
- tmp = Index_of[lambda[j - 1]];
- if(tmp != A0)
- lambda[j] ^= Alpha_to[modnn(u + tmp)];
- }
- }
-#if DEBUG_ECC >= 1
- /* Test code that verifies the erasure locator polynomial just constructed
- Needed only for decoder debugging. */
-
- /* find roots of the erasure location polynomial */
- for(i=1;i<=no_eras;i++)
- reg[i] = Index_of[lambda[i]];
- count = 0;
- for (i = 1,k=NN-Ldec; i <= NN; i++,k = modnn(NN+k-Ldec)) {
- q = 1;
- for (j = 1; j <= no_eras; j++)
- if (reg[j] != A0) {
- reg[j] = modnn(reg[j] + j);
- q ^= Alpha_to[reg[j]];
- }
- if (q != 0)
- continue;
- /* store root and error location number indices */
- root[count] = i;
- loc[count] = k;
- count++;
- }
- if (count != no_eras) {
- printf("\n lambda(x) is WRONG\n");
- count = -1;
- goto finish;
- }
-#if DEBUG_ECC >= 2
- printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n");
- for (i = 0; i < count; i++)
- printf("%d ", loc[i]);
- printf("\n");
-#endif
-#endif
- }
- for(i=0;i<NN-KK+1;i++)
- b[i] = Index_of[lambda[i]];
-
- /*
- * Begin Berlekamp-Massey algorithm to determine error+erasure
- * locator polynomial
- */
- r = no_eras;
- el = no_eras;
- while (++r <= NN-KK) { /* r is the step number */
- /* Compute discrepancy at the r-th step in poly-form */
- discr_r = 0;
- for (i = 0; i < r; i++){
- if ((lambda[i] != 0) && (s[r - i] != A0)) {
- discr_r ^= Alpha_to[modnn(Index_of[lambda[i]] + s[r - i])];
- }
- }
- discr_r = Index_of[discr_r]; /* Index form */
- if (discr_r == A0) {
- /* 2 lines below: B(x) <-- x*B(x) */
- COPYDOWN(&b[1],b,NN-KK);
- b[0] = A0;
- } else {
- /* 7 lines below: T(x) <-- lambda(x) - discr_r*x*b(x) */
- t[0] = lambda[0];
- for (i = 0 ; i < NN-KK; i++) {
- if(b[i] != A0)
- t[i+1] = lambda[i+1] ^ Alpha_to[modnn(discr_r + b[i])];
- else
- t[i+1] = lambda[i+1];
- }
- if (2 * el <= r + no_eras - 1) {
- el = r + no_eras - el;
- /*
- * 2 lines below: B(x) <-- inv(discr_r) *
- * lambda(x)
- */
- for (i = 0; i <= NN-KK; i++)
- b[i] = (lambda[i] == 0) ? A0 : modnn(Index_of[lambda[i]] - discr_r + NN);
- } else {
- /* 2 lines below: B(x) <-- x*B(x) */
- COPYDOWN(&b[1],b,NN-KK);
- b[0] = A0;
- }
- COPY(lambda,t,NN-KK+1);
- }
- }
-
- /* Convert lambda to index form and compute deg(lambda(x)) */
- deg_lambda = 0;
- for(i=0;i<NN-KK+1;i++){
- lambda[i] = Index_of[lambda[i]];
- if(lambda[i] != A0)
- deg_lambda = i;
- }
- /*
- * Find roots of the error+erasure locator polynomial by Chien
- * Search
- */
- COPY(®[1],&lambda[1],NN-KK);
- count = 0; /* Number of roots of lambda(x) */
- for (i = 1,k=NN-Ldec; i <= NN; i++,k = modnn(NN+k-Ldec)) {
- q = 1;
- for (j = deg_lambda; j > 0; j--){
- if (reg[j] != A0) {
- reg[j] = modnn(reg[j] + j);
- q ^= Alpha_to[reg[j]];
- }
- }
- if (q != 0)
- continue;
- /* store root (index-form) and error location number */
- root[count] = i;
- loc[count] = k;
- /* If we've already found max possible roots,
- * abort the search to save time
- */
- if(++count == deg_lambda)
- break;
- }
- if (deg_lambda != count) {
- /*
- * deg(lambda) unequal to number of roots => uncorrectable
- * error detected
- */
- count = -1;
- goto finish;
- }
- /*
- * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo
- * x**(NN-KK)). in index form. Also find deg(omega).
- */
- deg_omega = 0;
- for (i = 0; i < NN-KK;i++){
- tmp = 0;
- j = (deg_lambda < i) ? deg_lambda : i;
- for(;j >= 0; j--){
- if ((s[i + 1 - j] != A0) && (lambda[j] != A0))
- tmp ^= Alpha_to[modnn(s[i + 1 - j] + lambda[j])];
- }
- if(tmp != 0)
- deg_omega = i;
- omega[i] = Index_of[tmp];
- }
- omega[NN-KK] = A0;
-
- /*
- * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 =
- * inv(X(l))**(B0-1) and den = lambda_pr(inv(X(l))) all in poly-form
- */
- for (j = count-1; j >=0; j--) {
- num1 = 0;
- for (i = deg_omega; i >= 0; i--) {
- if (omega[i] != A0)
- num1 ^= Alpha_to[modnn(omega[i] + i * root[j])];
- }
- num2 = Alpha_to[modnn(root[j] * (B0 - 1) + NN)];
- den = 0;
-
- /* lambda[i+1] for i even is the formal derivative lambda_pr of lambda[i] */
- for (i = min(deg_lambda,NN-KK-1) & ~1; i >= 0; i -=2) {
- if(lambda[i+1] != A0)
- den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])];
- }
- if (den == 0) {
-#if DEBUG_ECC >= 1
- printf("\n ERROR: denominator = 0\n");
-#endif
- /* Convert to dual- basis */
- count = -1;
- goto finish;
- }
- /* Apply error to data */
- if (num1 != 0) {
- eras_val[j] = Alpha_to[modnn(Index_of[num1] + Index_of[num2] + NN - Index_of[den])];
- } else {
- eras_val[j] = 0;
- }
- }
- finish:
- for(i=0;i<count;i++)
- eras_pos[i] = loc[i];
- return count;
-}
-
-/***************************************************************************/
-/* The DOC specific code begins here */
-
-#define SECTOR_SIZE 512
-/* The sector bytes are packed into NB_DATA MM bits words */
-#define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / MM)
-
-/*
- * Correct the errors in 'sector[]' by using 'ecc1[]' which is the
- * content of the feedback shift register applyied to the sector and
- * the ECC. Return the number of errors corrected (and correct them in
- * sector), or -1 if error
- */
-int doc_decode_ecc(unsigned char sector[SECTOR_SIZE], unsigned char ecc1[6])
-{
- int parity, i, nb_errors;
- gf bb[NN - KK + 1];
- gf error_val[NN-KK];
- int error_pos[NN-KK], pos, bitpos, index, val;
- dtype *Alpha_to, *Index_of;
-
- /* init log and exp tables here to save memory. However, it is slower */
- Alpha_to = kmalloc((NN + 1) * sizeof(dtype), GFP_KERNEL);
- if (!Alpha_to)
- return -1;
-
- Index_of = kmalloc((NN + 1) * sizeof(dtype), GFP_KERNEL);
- if (!Index_of) {
- kfree(Alpha_to);
- return -1;
- }
-
- generate_gf(Alpha_to, Index_of);
-
- parity = ecc1[1];
-
- bb[0] = (ecc1[4] & 0xff) | ((ecc1[5] & 0x03) << 8);
- bb[1] = ((ecc1[5] & 0xfc) >> 2) | ((ecc1[2] & 0x0f) << 6);
- bb[2] = ((ecc1[2] & 0xf0) >> 4) | ((ecc1[3] & 0x3f) << 4);
- bb[3] = ((ecc1[3] & 0xc0) >> 6) | ((ecc1[0] & 0xff) << 2);
-
- nb_errors = eras_dec_rs(Alpha_to, Index_of, bb,
- error_val, error_pos, 0);
- if (nb_errors <= 0)
- goto the_end;
-
- /* correct the errors */
- for(i=0;i<nb_errors;i++) {
- pos = error_pos[i];
- if (pos >= NB_DATA && pos < KK) {
- nb_errors = -1;
- goto the_end;
- }
- if (pos < NB_DATA) {
- /* extract bit position (MSB first) */
- pos = 10 * (NB_DATA - 1 - pos) - 6;
- /* now correct the following 10 bits. At most two bytes
- can be modified since pos is even */
- index = (pos >> 3) ^ 1;
- bitpos = pos & 7;
- if ((index >= 0 && index < SECTOR_SIZE) ||
- index == (SECTOR_SIZE + 1)) {
- val = error_val[i] >> (2 + bitpos);
- parity ^= val;
- if (index < SECTOR_SIZE)
- sector[index] ^= val;
- }
- index = ((pos >> 3) + 1) ^ 1;
- bitpos = (bitpos + 10) & 7;
- if (bitpos == 0)
- bitpos = 8;
- if ((index >= 0 && index < SECTOR_SIZE) ||
- index == (SECTOR_SIZE + 1)) {
- val = error_val[i] << (8 - bitpos);
- parity ^= val;
- if (index < SECTOR_SIZE)
- sector[index] ^= val;
- }
- }
- }
-
- /* use parity to test extra errors */
- if ((parity & 0xff) != 0)
- nb_errors = -1;
-
- the_end:
- kfree(Alpha_to);
- kfree(Index_of);
- return nb_errors;
-}
-
-EXPORT_SYMBOL_GPL(doc_decode_ecc);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Fabrice Bellard <fabrice.bellard@netgem.com>");
-MODULE_DESCRIPTION("ECC code for correcting errors detected by DiskOnChip 2000 and Millennium ECC hardware");
doc_writeb(docg3, addr, DOC_FLASHADDRESS);
}
-static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
+static char const * const part_probes[] = { "cmdlinepart", "saftlpart", NULL };
static int doc_register_readb(struct docg3 *docg3, int reg)
{
.remove = __exit_p(docg3_release),
};
-static int __init docg3_init(void)
-{
- return platform_driver_probe(&g3_driver, docg3_probe);
-}
-module_init(docg3_init);
-
-
-static void __exit docg3_exit(void)
-{
- platform_driver_unregister(&g3_driver);
-}
-module_exit(docg3_exit);
+module_platform_driver_probe(g3_driver, docg3_probe);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
+++ /dev/null
-
-/* Linux driver for Disk-On-Chip devices */
-/* Probe routines common to all DoC devices */
-/* (C) 1999 Machine Vision Holdings, Inc. */
-/* (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> */
-
-
-/* DOC_PASSIVE_PROBE:
- In order to ensure that the BIOS checksum is correct at boot time, and
- hence that the onboard BIOS extension gets executed, the DiskOnChip
- goes into reset mode when it is read sequentially: all registers
- return 0xff until the chip is woken up again by writing to the
- DOCControl register.
-
- Unfortunately, this means that the probe for the DiskOnChip is unsafe,
- because one of the first things it does is write to where it thinks
- the DOCControl register should be - which may well be shared memory
- for another device. I've had machines which lock up when this is
- attempted. Hence the possibility to do a passive probe, which will fail
- to detect a chip in reset mode, but is at least guaranteed not to lock
- the machine.
-
- If you have this problem, uncomment the following line:
-#define DOC_PASSIVE_PROBE
-*/
-
-
-/* DOC_SINGLE_DRIVER:
- Millennium driver has been merged into DOC2000 driver.
-
- The old Millennium-only driver has been retained just in case there
- are problems with the new code. If the combined driver doesn't work
- for you, you can try the old one by undefining DOC_SINGLE_DRIVER
- below and also enabling it in your configuration. If this fixes the
- problems, please send a report to the MTD mailing list at
- <linux-mtd@lists.infradead.org>.
-*/
-#define DOC_SINGLE_DRIVER
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/doc2000.h>
-
-
-static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS;
-module_param(doc_config_location, ulong, 0);
-MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
-
-static unsigned long __initdata doc_locations[] = {
-#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
-#ifdef CONFIG_MTD_DOCPROBE_HIGH
- 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
- 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
- 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
- 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
- 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
-#else /* CONFIG_MTD_DOCPROBE_HIGH */
- 0xc8000, 0xca000, 0xcc000, 0xce000,
- 0xd0000, 0xd2000, 0xd4000, 0xd6000,
- 0xd8000, 0xda000, 0xdc000, 0xde000,
- 0xe0000, 0xe2000, 0xe4000, 0xe6000,
- 0xe8000, 0xea000, 0xec000, 0xee000,
-#endif /* CONFIG_MTD_DOCPROBE_HIGH */
-#endif
- 0xffffffff };
-
-/* doccheck: Probe a given memory window to see if there's a DiskOnChip present */
-
-static inline int __init doccheck(void __iomem *potential, unsigned long physadr)
-{
- void __iomem *window=potential;
- unsigned char tmp, tmpb, tmpc, ChipID;
-#ifndef DOC_PASSIVE_PROBE
- unsigned char tmp2;
-#endif
-
- /* Routine copied from the Linux DOC driver */
-
-#ifdef CONFIG_MTD_DOCPROBE_55AA
- /* Check for 0x55 0xAA signature at beginning of window,
- this is no longer true once we remove the IPL (for Millennium */
- if (ReadDOC(window, Sig1) != 0x55 || ReadDOC(window, Sig2) != 0xaa)
- return 0;
-#endif /* CONFIG_MTD_DOCPROBE_55AA */
-
-#ifndef DOC_PASSIVE_PROBE
- /* It's not possible to cleanly detect the DiskOnChip - the
- * bootup procedure will put the device into reset mode, and
- * it's not possible to talk to it without actually writing
- * to the DOCControl register. So we store the current contents
- * of the DOCControl register's location, in case we later decide
- * that it's not a DiskOnChip, and want to put it back how we
- * found it.
- */
- tmp2 = ReadDOC(window, DOCControl);
-
- /* Reset the DiskOnChip ASIC */
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
- window, DOCControl);
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
- window, DOCControl);
-
- /* Enable the DiskOnChip ASIC */
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
- window, DOCControl);
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
- window, DOCControl);
-#endif /* !DOC_PASSIVE_PROBE */
-
- /* We need to read the ChipID register four times. For some
- newer DiskOnChip 2000 units, the first three reads will
- return the DiskOnChip Millennium ident. Don't ask. */
- ChipID = ReadDOC(window, ChipID);
-
- switch (ChipID) {
- case DOC_ChipID_Doc2k:
- /* Check the TOGGLE bit in the ECC register */
- tmp = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
- tmpb = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
- tmpc = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
- if (tmp != tmpb && tmp == tmpc)
- return ChipID;
- break;
-
- case DOC_ChipID_DocMil:
- /* Check for the new 2000 with Millennium ASIC */
- ReadDOC(window, ChipID);
- ReadDOC(window, ChipID);
- if (ReadDOC(window, ChipID) != DOC_ChipID_DocMil)
- ChipID = DOC_ChipID_Doc2kTSOP;
-
- /* Check the TOGGLE bit in the ECC register */
- tmp = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
- tmpb = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
- tmpc = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
- if (tmp != tmpb && tmp == tmpc)
- return ChipID;
- break;
-
- case DOC_ChipID_DocMilPlus16:
- case DOC_ChipID_DocMilPlus32:
- case 0:
- /* Possible Millennium+, need to do more checks */
-#ifndef DOC_PASSIVE_PROBE
- /* Possibly release from power down mode */
- for (tmp = 0; (tmp < 4); tmp++)
- ReadDOC(window, Mplus_Power);
-
- /* Reset the DiskOnChip ASIC */
- tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
- DOC_MODE_BDECT;
- WriteDOC(tmp, window, Mplus_DOCControl);
- WriteDOC(~tmp, window, Mplus_CtrlConfirm);
-
- mdelay(1);
- /* Enable the DiskOnChip ASIC */
- tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
- DOC_MODE_BDECT;
- WriteDOC(tmp, window, Mplus_DOCControl);
- WriteDOC(~tmp, window, Mplus_CtrlConfirm);
- mdelay(1);
-#endif /* !DOC_PASSIVE_PROBE */
-
- ChipID = ReadDOC(window, ChipID);
-
- switch (ChipID) {
- case DOC_ChipID_DocMilPlus16:
- case DOC_ChipID_DocMilPlus32:
- /* Check the TOGGLE bit in the toggle register */
- tmp = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
- tmpb = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
- tmpc = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
- if (tmp != tmpb && tmp == tmpc)
- return ChipID;
- default:
- break;
- }
- /* FALL TRHU */
-
- default:
-
-#ifdef CONFIG_MTD_DOCPROBE_55AA
- printk(KERN_DEBUG "Possible DiskOnChip with unknown ChipID %2.2X found at 0x%lx\n",
- ChipID, physadr);
-#endif
-#ifndef DOC_PASSIVE_PROBE
- /* Put back the contents of the DOCControl register, in case it's not
- * actually a DiskOnChip.
- */
- WriteDOC(tmp2, window, DOCControl);
-#endif
- return 0;
- }
-
- printk(KERN_WARNING "DiskOnChip failed TOGGLE test, dropping.\n");
-
-#ifndef DOC_PASSIVE_PROBE
- /* Put back the contents of the DOCControl register: it's not a DiskOnChip */
- WriteDOC(tmp2, window, DOCControl);
-#endif
- return 0;
-}
-
-static int docfound;
-
-extern void DoC2k_init(struct mtd_info *);
-extern void DoCMil_init(struct mtd_info *);
-extern void DoCMilPlus_init(struct mtd_info *);
-
-static void __init DoC_Probe(unsigned long physadr)
-{
- void __iomem *docptr;
- struct DiskOnChip *this;
- struct mtd_info *mtd;
- int ChipID;
- char namebuf[15];
- char *name = namebuf;
- void (*initroutine)(struct mtd_info *) = NULL;
-
- docptr = ioremap(physadr, DOC_IOREMAP_LEN);
-
- if (!docptr)
- return;
-
- if ((ChipID = doccheck(docptr, physadr))) {
- if (ChipID == DOC_ChipID_Doc2kTSOP) {
- /* Remove this at your own peril. The hardware driver works but nothing prevents you from erasing bad blocks */
- printk(KERN_NOTICE "Refusing to drive DiskOnChip 2000 TSOP until Bad Block Table is correctly supported by INFTL\n");
- iounmap(docptr);
- return;
- }
- docfound = 1;
- mtd = kzalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
- if (!mtd) {
- printk(KERN_WARNING "Cannot allocate memory for data structures. Dropping.\n");
- iounmap(docptr);
- return;
- }
-
- this = (struct DiskOnChip *)(&mtd[1]);
- mtd->priv = this;
- this->virtadr = docptr;
- this->physadr = physadr;
- this->ChipID = ChipID;
- sprintf(namebuf, "with ChipID %2.2X", ChipID);
-
- switch(ChipID) {
- case DOC_ChipID_Doc2kTSOP:
- name="2000 TSOP";
- initroutine = symbol_request(DoC2k_init);
- break;
-
- case DOC_ChipID_Doc2k:
- name="2000";
- initroutine = symbol_request(DoC2k_init);
- break;
-
- case DOC_ChipID_DocMil:
- name="Millennium";
-#ifdef DOC_SINGLE_DRIVER
- initroutine = symbol_request(DoC2k_init);
-#else
- initroutine = symbol_request(DoCMil_init);
-#endif /* DOC_SINGLE_DRIVER */
- break;
-
- case DOC_ChipID_DocMilPlus16:
- case DOC_ChipID_DocMilPlus32:
- name="MillenniumPlus";
- initroutine = symbol_request(DoCMilPlus_init);
- break;
- }
-
- if (initroutine) {
- (*initroutine)(mtd);
- symbol_put_addr(initroutine);
- return;
- }
- printk(KERN_NOTICE "Cannot find driver for DiskOnChip %s at 0x%lX\n", name, physadr);
- kfree(mtd);
- }
- iounmap(docptr);
-}
-
-
-/****************************************************************************
- *
- * Module stuff
- *
- ****************************************************************************/
-
-static int __init init_doc(void)
-{
- int i;
-
- if (doc_config_location) {
- printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location);
- DoC_Probe(doc_config_location);
- } else {
- for (i=0; (doc_locations[i] != 0xffffffff); i++) {
- DoC_Probe(doc_locations[i]);
- }
- }
- /* No banner message any more. Print a message if no DiskOnChip
- found, so the user knows we at least tried. */
- if (!docfound)
- printk(KERN_INFO "No recognised DiskOnChip devices found\n");
- return -EAGAIN;
-}
-
-module_init(init_doc);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("Probe code for DiskOnChip 2000 and Millennium devices");
-
* @dev: ELM device
* @bch_type: Type of BCH ecc
*/
-void elm_config(struct device *dev, enum bch_ecc bch_type)
+int elm_config(struct device *dev, enum bch_ecc bch_type)
{
u32 reg_val;
struct elm_info *info = dev_get_drvdata(dev);
+ if (!info) {
+ dev_err(dev, "Unable to configure elm - device not probed?\n");
+ return -ENODEV;
+ }
+
reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);
elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val);
info->bch_type = bch_type;
+
+ return 0;
}
EXPORT_SYMBOL(elm_config);
u16 flags;
#define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */
#define M25P_NO_ERASE 0x02 /* No erase command needed */
+#define SST_WRITE 0x04 /* use SST byte programming */
};
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
/* Everspin */
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) },
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
{ "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
- { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
/* Macronix */
{ "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, 0) },
/* Micron */
+ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
{ "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
{ "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
- { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K) },
- { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K) },
- { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K) },
- { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K) },
- { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K) },
- { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K) },
- { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K) },
- { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K) },
+ { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
+ { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
+ { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
+ { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
+ { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
+ { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
+ { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
+ { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
+ { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
/* ST Microelectronics -- newer production may have feature updates */
{ "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
/* Catalyst / On Semiconductor -- non-JEDEC */
}
/* sst flash chips use AAI word program */
- if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
+ if (info->flags & SST_WRITE)
flash->mtd._write = sst_write;
else
flash->mtd._write = m25p80_write;
{ .compatible = "atmel,dataflash", },
{ /* sentinel */ }
};
-#else
-#define dataflash_dt_ids NULL
#endif
/* ......................................................................... */
.driver = {
.name = "mtd_dataflash",
.owner = THIS_MODULE,
- .of_match_table = dataflash_dt_ids,
+ .of_match_table = of_match_ptr(dataflash_dt_ids),
},
.probe = dataflash_probe,
help
Support for NOR flash attached to the Lantiq SoC's External Bus Unit.
-config MTD_DILNETPC
- tristate "CFI Flash device mapped on DIL/Net PC"
- depends on X86 && MTD_CFI_INTELEXT && BROKEN
- help
- MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP".
- For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm>
- and <http://www.ssv-embedded.de/ssv/pc104/p170.htm>
-
-config MTD_DILNETPC_BOOTSIZE
- hex "Size of DIL/Net PC flash boot partition"
- depends on MTD_DILNETPC
- default "0x80000"
- help
- The amount of space taken up by the kernel or Etherboot
- on the DIL/Net PC flash chips.
-
config MTD_L440GX
tristate "BIOS flash chip on Intel L440GX boards"
depends on X86 && MTD_JEDECPROBE
BE VERY CAREFUL.
-config MTD_TQM8XXL
- tristate "CFI Flash device mapped on TQM8XXL"
- depends on MTD_CFI && TQM8xxL
- help
- The TQM8xxL PowerPC board has up to two banks of CFI-compliant
- chips, currently uses AMD one. This 'mapping' driver supports
- that arrangement, allowing the CFI probe and command set driver
- code to communicate with the chips on the TQM8xxL board. More at
- <http://www.denx.de/wiki/PPCEmbedded/>.
-
-config MTD_RPXLITE
- tristate "CFI Flash device mapped on RPX Lite or CLLF"
- depends on MTD_CFI && (RPXCLASSIC || RPXLITE)
- help
- The RPXLite PowerPC board has CFI-compliant chips mapped in
- a strange sparse mapping. This 'mapping' driver supports that
- arrangement, allowing the CFI probe and command set driver code
- to communicate with the chips on the RPXLite board. More at
- <http://www.embeddedplanet.com/>.
-
-config MTD_MBX860
- tristate "System flash on MBX860 board"
- depends on MTD_CFI && MBX
- help
- This enables access routines for the flash chips on the Motorola
- MBX860 board. If you have one of these boards and would like
- to use the flash chips on it, say 'Y'.
-
-config MTD_DBOX2
- tristate "CFI Flash device mapped on D-Box2"
- depends on DBOX2 && MTD_CFI_INTELSTD && MTD_CFI_INTELEXT && MTD_CFI_AMDSTD
- help
- This enables access routines for the flash chips on the Nokia/Sagem
- D-Box 2 board. If you have one of these boards and would like to use
- the flash chips on it, say 'Y'.
-
config MTD_CFI_FLAGADM
tristate "CFI Flash device mapping on FlagaDM"
depends on 8xx && MTD_CFI
IXDP425 and Coyote. If you have an IXP4xx based board and
would like to use the flash chips on it, say 'Y'.
-config MTD_IXP2000
- tristate "CFI Flash device mapped on Intel IXP2000 based systems"
- depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP2000
- help
- This enables MTD access to flash devices on platforms based
- on Intel's IXP2000 family of network processors. If you have an
- IXP2000 based board and would like to use the flash chips on it,
- say 'Y'.
-
config MTD_AUTCPU12
bool "NV-RAM mapping AUTCPU12 board"
depends on ARCH_AUTCPU12
This enables access to the NOR Flash on the impA7 board of
implementa GmbH. If you have such a board, say 'Y' here.
-config MTD_H720X
- tristate "Hynix evaluation board mappings"
- depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 )
- help
- This enables access to the flash chips on the Hynix evaluation boards.
- If you have such a board, say 'Y'.
-
# This needs CFI or JEDEC, depending on the cards found.
config MTD_PCI
tristate "PCI MTD driver"
config MTD_GPIO_ADDR
tristate "GPIO-assisted Flash Chip Support"
- depends on GENERIC_GPIO || GPIOLIB
+ depends on GPIOLIB
depends on MTD_COMPLEX_MAPPINGS
help
Map driver which allows flashes to be partially physically addressed
help
Map driver to support image based filesystems for uClinux.
-config MTD_DMV182
- tristate "Map driver for Dy-4 SVME/DMV-182 board."
- depends on DMV182
- select MTD_MAP_BANK_WIDTH_32
- select MTD_CFI_I8
- select MTD_CFI_AMDSTD
- help
- Map driver for Dy-4 SVME/DMV-182 board.
-
config MTD_INTEL_VR_NOR
tristate "NOR flash on Intel Vermilion Range Expansion Bus CS0"
depends on PCI
# Chip mappings
obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
obj-$(CONFIG_MTD_DC21285) += dc21285.o
-obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o
obj-$(CONFIG_MTD_L440GX) += l440gx.o
obj-$(CONFIG_MTD_AMD76XROM) += amd76xrom.o
obj-$(CONFIG_MTD_ESB2ROM) += esb2rom.o
obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
-obj-$(CONFIG_MTD_MBX860) += mbx860.o
obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
obj-$(CONFIG_MTD_PISMO) += pismo.o
obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
-obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
-obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o
obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o
obj-$(CONFIG_MTD_SUN_UFLASH) += sun_uflash.o
obj-$(CONFIG_MTD_VMAX) += vmax301.o
obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o
-obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o
obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
obj-$(CONFIG_MTD_PCI) += pci.o
obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
obj-$(CONFIG_MTD_NETtel) += nettel.o
obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
-obj-$(CONFIG_MTD_H720X) += h720x-flash.o
obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o
-obj-$(CONFIG_MTD_IXP2000) += ixp2000.o
-obj-$(CONFIG_MTD_DMV182) += dmv182.o
obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o
switch_back(state);
}
-static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
+static const char * const part_probe_types[] = {
+ "cmdlinepart", "RedBoot", NULL };
static int bfin_flash_probe(struct platform_device *pdev)
{
out:
/* Free any left over map structures */
- if (map)
- kfree(map);
+ kfree(map);
/* See if I have any map structures */
if (list_empty(&window->maps)) {
+++ /dev/null
-/*
- * D-Box 2 flash driver
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/errno.h>
-
-/* partition_info gives details on the logical partitions that the split the
- * single flash device into. If the size if zero we use up to the end of the
- * device. */
-static struct mtd_partition partition_info[]= {
- {
- .name = "BR bootloader",
- .size = 128 * 1024,
- .offset = 0,
- .mask_flags = MTD_WRITEABLE
- },
- {
- .name = "FLFS (U-Boot)",
- .size = 128 * 1024,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = 0
- },
- {
- .name = "Root (SquashFS)",
- .size = 7040 * 1024,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = 0
- },
- {
- .name = "var (JFFS2)",
- .size = 896 * 1024,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = 0
- },
- {
- .name = "Flash without bootloader",
- .size = MTDPART_SIZ_FULL,
- .offset = 128 * 1024,
- .mask_flags = 0
- },
- {
- .name = "Complete Flash",
- .size = MTDPART_SIZ_FULL,
- .offset = 0,
- .mask_flags = MTD_WRITEABLE
- }
-};
-
-#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
-
-#define WINDOW_ADDR 0x10000000
-#define WINDOW_SIZE 0x800000
-
-static struct mtd_info *mymtd;
-
-
-struct map_info dbox2_flash_map = {
- .name = "D-Box 2 flash memory",
- .size = WINDOW_SIZE,
- .bankwidth = 4,
- .phys = WINDOW_ADDR,
-};
-
-static int __init init_dbox2_flash(void)
-{
- printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR);
- dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
-
- if (!dbox2_flash_map.virt) {
- printk("Failed to ioremap\n");
- return -EIO;
- }
- simple_map_init(&dbox2_flash_map);
-
- // Probe for dual Intel 28F320 or dual AMD
- mymtd = do_map_probe("cfi_probe", &dbox2_flash_map);
- if (!mymtd) {
- // Probe for single Intel 28F640
- dbox2_flash_map.bankwidth = 2;
-
- mymtd = do_map_probe("cfi_probe", &dbox2_flash_map);
- }
-
- if (mymtd) {
- mymtd->owner = THIS_MODULE;
-
- /* Create MTD devices for each partition. */
- mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
-
- return 0;
- }
-
- iounmap((void *)dbox2_flash_map.virt);
- return -ENXIO;
-}
-
-static void __exit cleanup_dbox2_flash(void)
-{
- if (mymtd) {
- mtd_device_unregister(mymtd);
- map_destroy(mymtd);
- }
- if (dbox2_flash_map.virt) {
- iounmap((void *)dbox2_flash_map.virt);
- dbox2_flash_map.virt = 0;
- }
-}
-
-module_init(init_dbox2_flash);
-module_exit(cleanup_dbox2_flash);
-
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kári Davíðsson <kd@flaga.is>, Bastian Blank <waldi@tuxbox.org>, Alexander Wild <wild@te-elektronik.com>");
-MODULE_DESCRIPTION("MTD map driver for D-Box 2 board");
.copy_from = dc21285_copy_from,
};
-
/* Partition stuff */
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
static int __init init_dc21285(void)
{
+++ /dev/null
-/* dilnetpc.c -- MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP"
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- *
- * The DIL/Net PC is a tiny embedded PC board made by SSV Embedded Systems
- * featuring the AMD Elan SC410 processor. There are two variants of this
- * board: DNP/1486 and ADNP/1486. The DNP version has 2 megs of flash
- * ROM (Intel 28F016S3) and 8 megs of DRAM, the ADNP version has 4 megs
- * flash and 16 megs of RAM.
- * For details, see http://www.ssv-embedded.de/ssv/pc104/p169.htm
- * and http://www.ssv-embedded.de/ssv/pc104/p170.htm
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/concat.h>
-
-#include <asm/io.h>
-
-/*
-** The DIL/NetPC keeps its BIOS in two distinct flash blocks.
-** Destroying any of these blocks transforms the DNPC into
-** a paperweight (albeit not a very useful one, considering
-** it only weighs a few grams).
-**
-** Therefore, the BIOS blocks must never be erased or written to
-** except by people who know exactly what they are doing (e.g.
-** to install a BIOS update). These partitions are marked read-only
-** by default, but can be made read/write by undefining
-** DNPC_BIOS_BLOCKS_WRITEPROTECTED:
-*/
-#define DNPC_BIOS_BLOCKS_WRITEPROTECTED
-
-/*
-** The ID string (in ROM) is checked to determine whether we
-** are running on a DNP/1486 or ADNP/1486
-*/
-#define BIOSID_BASE 0x000fe100
-
-#define ID_DNPC "DNP1486"
-#define ID_ADNP "ADNP1486"
-
-/*
-** Address where the flash should appear in CPU space
-*/
-#define FLASH_BASE 0x2000000
-
-/*
-** Chip Setup and Control (CSC) indexed register space
-*/
-#define CSC_INDEX 0x22
-#define CSC_DATA 0x23
-
-#define CSC_MMSWAR 0x30 /* MMS window C-F attributes register */
-#define CSC_MMSWDSR 0x31 /* MMS window C-F device select register */
-
-#define CSC_RBWR 0xa7 /* GPIO Read-Back/Write Register B */
-
-#define CSC_CR 0xd0 /* internal I/O device disable/Echo */
- /* Z-bus/configuration register */
-
-#define CSC_PCCMDCR 0xf1 /* PC card mode and DMA control register */
-
-
-/*
-** PC Card indexed register space:
-*/
-
-#define PCC_INDEX 0x3e0
-#define PCC_DATA 0x3e1
-
-#define PCC_AWER_B 0x46 /* Socket B Address Window enable register */
-#define PCC_MWSAR_1_Lo 0x58 /* memory window 1 start address low register */
-#define PCC_MWSAR_1_Hi 0x59 /* memory window 1 start address high register */
-#define PCC_MWEAR_1_Lo 0x5A /* memory window 1 stop address low register */
-#define PCC_MWEAR_1_Hi 0x5B /* memory window 1 stop address high register */
-#define PCC_MWAOR_1_Lo 0x5C /* memory window 1 address offset low register */
-#define PCC_MWAOR_1_Hi 0x5D /* memory window 1 address offset high register */
-
-
-/*
-** Access to SC4x0's Chip Setup and Control (CSC)
-** and PC Card (PCC) indexed registers:
-*/
-static inline void setcsc(int reg, unsigned char data)
-{
- outb(reg, CSC_INDEX);
- outb(data, CSC_DATA);
-}
-
-static inline unsigned char getcsc(int reg)
-{
- outb(reg, CSC_INDEX);
- return(inb(CSC_DATA));
-}
-
-static inline void setpcc(int reg, unsigned char data)
-{
- outb(reg, PCC_INDEX);
- outb(data, PCC_DATA);
-}
-
-static inline unsigned char getpcc(int reg)
-{
- outb(reg, PCC_INDEX);
- return(inb(PCC_DATA));
-}
-
-
-/*
-************************************************************
-** Enable access to DIL/NetPC's flash by mapping it into
-** the SC4x0's MMS Window C.
-************************************************************
-*/
-static void dnpc_map_flash(unsigned long flash_base, unsigned long flash_size)
-{
- unsigned long flash_end = flash_base + flash_size - 1;
-
- /*
- ** enable setup of MMS windows C-F:
- */
- /* - enable PC Card indexed register space */
- setcsc(CSC_CR, getcsc(CSC_CR) | 0x2);
- /* - set PC Card controller to operate in standard mode */
- setcsc(CSC_PCCMDCR, getcsc(CSC_PCCMDCR) & ~1);
-
- /*
- ** Program base address and end address of window
- ** where the flash ROM should appear in CPU address space
- */
- setpcc(PCC_MWSAR_1_Lo, (flash_base >> 12) & 0xff);
- setpcc(PCC_MWSAR_1_Hi, (flash_base >> 20) & 0x3f);
- setpcc(PCC_MWEAR_1_Lo, (flash_end >> 12) & 0xff);
- setpcc(PCC_MWEAR_1_Hi, (flash_end >> 20) & 0x3f);
-
- /* program offset of first flash location to appear in this window (0) */
- setpcc(PCC_MWAOR_1_Lo, ((0 - flash_base) >> 12) & 0xff);
- setpcc(PCC_MWAOR_1_Hi, ((0 - flash_base)>> 20) & 0x3f);
-
- /* set attributes for MMS window C: non-cacheable, write-enabled */
- setcsc(CSC_MMSWAR, getcsc(CSC_MMSWAR) & ~0x11);
-
- /* select physical device ROMCS0 (i.e. flash) for MMS Window C */
- setcsc(CSC_MMSWDSR, getcsc(CSC_MMSWDSR) & ~0x03);
-
- /* enable memory window 1 */
- setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) | 0x02);
-
- /* now disable PC Card indexed register space again */
- setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2);
-}
-
-
-/*
-************************************************************
-** Disable access to DIL/NetPC's flash by mapping it into
-** the SC4x0's MMS Window C.
-************************************************************
-*/
-static void dnpc_unmap_flash(void)
-{
- /* - enable PC Card indexed register space */
- setcsc(CSC_CR, getcsc(CSC_CR) | 0x2);
-
- /* disable memory window 1 */
- setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) & ~0x02);
-
- /* now disable PC Card indexed register space again */
- setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2);
-}
-
-
-
-/*
-************************************************************
-** Enable/Disable VPP to write to flash
-************************************************************
-*/
-
-static DEFINE_SPINLOCK(dnpc_spin);
-static int vpp_counter = 0;
-/*
-** This is what has to be done for the DNP board ..
-*/
-static void dnp_set_vpp(struct map_info *not_used, int on)
-{
- spin_lock_irq(&dnpc_spin);
-
- if (on)
- {
- if(++vpp_counter == 1)
- setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x4);
- }
- else
- {
- if(--vpp_counter == 0)
- setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x4);
- else
- BUG_ON(vpp_counter < 0);
- }
- spin_unlock_irq(&dnpc_spin);
-}
-
-/*
-** .. and this the ADNP version:
-*/
-static void adnp_set_vpp(struct map_info *not_used, int on)
-{
- spin_lock_irq(&dnpc_spin);
-
- if (on)
- {
- if(++vpp_counter == 1)
- setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x8);
- }
- else
- {
- if(--vpp_counter == 0)
- setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x8);
- else
- BUG_ON(vpp_counter < 0);
- }
- spin_unlock_irq(&dnpc_spin);
-}
-
-
-
-#define DNP_WINDOW_SIZE 0x00200000 /* DNP flash size is 2MiB */
-#define ADNP_WINDOW_SIZE 0x00400000 /* ADNP flash size is 4MiB */
-#define WINDOW_ADDR FLASH_BASE
-
-static struct map_info dnpc_map = {
- .name = "ADNP Flash Bank",
- .size = ADNP_WINDOW_SIZE,
- .bankwidth = 1,
- .set_vpp = adnp_set_vpp,
- .phys = WINDOW_ADDR
-};
-
-/*
-** The layout of the flash is somewhat "strange":
-**
-** 1. 960 KiB (15 blocks) : Space for ROM Bootloader and user data
-** 2. 64 KiB (1 block) : System BIOS
-** 3. 960 KiB (15 blocks) : User Data (DNP model) or
-** 3. 3008 KiB (47 blocks) : User Data (ADNP model)
-** 4. 64 KiB (1 block) : System BIOS Entry
-*/
-
-static struct mtd_partition partition_info[]=
-{
- {
- .name = "ADNP boot",
- .offset = 0,
- .size = 0xf0000,
- },
- {
- .name = "ADNP system BIOS",
- .offset = MTDPART_OFS_NXTBLK,
- .size = 0x10000,
-#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
- .mask_flags = MTD_WRITEABLE,
-#endif
- },
- {
- .name = "ADNP file system",
- .offset = MTDPART_OFS_NXTBLK,
- .size = 0x2f0000,
- },
- {
- .name = "ADNP system BIOS entry",
- .offset = MTDPART_OFS_NXTBLK,
- .size = MTDPART_SIZ_FULL,
-#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
- .mask_flags = MTD_WRITEABLE,
-#endif
- },
-};
-
-#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
-
-static struct mtd_info *mymtd;
-static struct mtd_info *lowlvl_parts[NUM_PARTITIONS];
-static struct mtd_info *merged_mtd;
-
-/*
-** "Highlevel" partition info:
-**
-** Using the MTD concat layer, we can re-arrange partitions to our
-** liking: we construct a virtual MTD device by concatenating the
-** partitions, specifying the sequence such that the boot block
-** is immediately followed by the filesystem block (i.e. the stupid
-** system BIOS block is mapped to a different place). When re-partitioning
-** this concatenated MTD device, we can set the boot block size to
-** an arbitrary (though erase block aligned) value i.e. not one that
-** is dictated by the flash's physical layout. We can thus set the
-** boot block to be e.g. 64 KB (which is fully sufficient if we want
-** to boot an etherboot image) or to -say- 1.5 MB if we want to boot
-** a large kernel image. In all cases, the remainder of the flash
-** is available as file system space.
-*/
-
-static struct mtd_partition higlvl_partition_info[]=
-{
- {
- .name = "ADNP boot block",
- .offset = 0,
- .size = CONFIG_MTD_DILNETPC_BOOTSIZE,
- },
- {
- .name = "ADNP file system space",
- .offset = MTDPART_OFS_NXTBLK,
- .size = ADNP_WINDOW_SIZE-CONFIG_MTD_DILNETPC_BOOTSIZE-0x20000,
- },
- {
- .name = "ADNP system BIOS + BIOS Entry",
- .offset = MTDPART_OFS_NXTBLK,
- .size = MTDPART_SIZ_FULL,
-#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
- .mask_flags = MTD_WRITEABLE,
-#endif
- },
-};
-
-#define NUM_HIGHLVL_PARTITIONS ARRAY_SIZE(higlvl_partition_info)
-
-
-static int dnp_adnp_probe(void)
-{
- char *biosid, rc = -1;
-
- biosid = (char*)ioremap(BIOSID_BASE, 16);
- if(biosid)
- {
- if(!strcmp(biosid, ID_DNPC))
- rc = 1; /* this is a DNPC */
- else if(!strcmp(biosid, ID_ADNP))
- rc = 0; /* this is a ADNPC */
- }
- iounmap((void *)biosid);
- return(rc);
-}
-
-
-static int __init init_dnpc(void)
-{
- int is_dnp;
-
- /*
- ** determine hardware (DNP/ADNP/invalid)
- */
- if((is_dnp = dnp_adnp_probe()) < 0)
- return -ENXIO;
-
- /*
- ** Things are set up for ADNP by default
- ** -> modify all that needs to be different for DNP
- */
- if(is_dnp)
- { /*
- ** Adjust window size, select correct set_vpp function.
- ** The partitioning scheme is identical on both DNP
- ** and ADNP except for the size of the third partition.
- */
- int i;
- dnpc_map.size = DNP_WINDOW_SIZE;
- dnpc_map.set_vpp = dnp_set_vpp;
- partition_info[2].size = 0xf0000;
-
- /*
- ** increment all string pointers so the leading 'A' gets skipped,
- ** thus turning all occurrences of "ADNP ..." into "DNP ..."
- */
- ++dnpc_map.name;
- for(i = 0; i < NUM_PARTITIONS; i++)
- ++partition_info[i].name;
- higlvl_partition_info[1].size = DNP_WINDOW_SIZE -
- CONFIG_MTD_DILNETPC_BOOTSIZE - 0x20000;
- for(i = 0; i < NUM_HIGHLVL_PARTITIONS; i++)
- ++higlvl_partition_info[i].name;
- }
-
- printk(KERN_NOTICE "DIL/Net %s flash: 0x%lx at 0x%llx\n",
- is_dnp ? "DNPC" : "ADNP", dnpc_map.size, (unsigned long long)dnpc_map.phys);
-
- dnpc_map.virt = ioremap_nocache(dnpc_map.phys, dnpc_map.size);
-
- dnpc_map_flash(dnpc_map.phys, dnpc_map.size);
-
- if (!dnpc_map.virt) {
- printk("Failed to ioremap_nocache\n");
- return -EIO;
- }
- simple_map_init(&dnpc_map);
-
- printk("FLASH virtual address: 0x%p\n", dnpc_map.virt);
-
- mymtd = do_map_probe("jedec_probe", &dnpc_map);
-
- if (!mymtd)
- mymtd = do_map_probe("cfi_probe", &dnpc_map);
-
- /*
- ** If flash probes fail, try to make flashes accessible
- ** at least as ROM. Ajust erasesize in this case since
- ** the default one (128M) will break our partitioning
- */
- if (!mymtd)
- if((mymtd = do_map_probe("map_rom", &dnpc_map)))
- mymtd->erasesize = 0x10000;
-
- if (!mymtd) {
- iounmap(dnpc_map.virt);
- return -ENXIO;
- }
-
- mymtd->owner = THIS_MODULE;
-
- /*
- ** Supply pointers to lowlvl_parts[] array to add_mtd_partitions()
- ** -> add_mtd_partitions() will _not_ register MTD devices for
- ** the partitions, but will instead store pointers to the MTD
- ** objects it creates into our lowlvl_parts[] array.
- ** NOTE: we arrange the pointers such that the sequence of the
- ** partitions gets re-arranged: partition #2 follows
- ** partition #0.
- */
- partition_info[0].mtdp = &lowlvl_parts[0];
- partition_info[1].mtdp = &lowlvl_parts[2];
- partition_info[2].mtdp = &lowlvl_parts[1];
- partition_info[3].mtdp = &lowlvl_parts[3];
-
- mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
-
- /*
- ** now create a virtual MTD device by concatenating the for partitions
- ** (in the sequence given by the lowlvl_parts[] array.
- */
- merged_mtd = mtd_concat_create(lowlvl_parts, NUM_PARTITIONS, "(A)DNP Flash Concatenated");
- if(merged_mtd)
- { /*
- ** now partition the new device the way we want it. This time,
- ** we do not supply mtd pointers in higlvl_partition_info, so
- ** add_mtd_partitions() will register the devices.
- */
- mtd_device_register(merged_mtd, higlvl_partition_info,
- NUM_HIGHLVL_PARTITIONS);
- }
-
- return 0;
-}
-
-static void __exit cleanup_dnpc(void)
-{
- if(merged_mtd) {
- mtd_device_unregister(merged_mtd);
- mtd_concat_destroy(merged_mtd);
- }
-
- if (mymtd) {
- mtd_device_unregister(mymtd);
- map_destroy(mymtd);
- }
- if (dnpc_map.virt) {
- iounmap(dnpc_map.virt);
- dnpc_unmap_flash();
- dnpc_map.virt = NULL;
- }
-}
-
-module_init(init_dnpc);
-module_exit(cleanup_dnpc);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Sysgo Real-Time Solutions GmbH");
-MODULE_DESCRIPTION("MTD map driver for SSV DIL/NetPC DNP & ADNP");
+++ /dev/null
-
-/*
- * drivers/mtd/maps/dmv182.c
- *
- * Flash map driver for the Dy4 SVME182 board
- *
- * Copyright 2003-2004, TimeSys Corporation
- *
- * Based on the SVME181 flash map, by Tom Nelson, Dot4, Inc. for TimeSys Corp.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/errno.h>
-
-/*
- * This driver currently handles only the 16MiB user flash bank 1 on the
- * board. It does not provide access to bank 0 (contains the Dy4 FFW), bank 2
- * (VxWorks boot), or the optional 48MiB expansion flash.
- *
- * scott.wood@timesys.com: On the newer boards with 128MiB flash, it
- * now supports the first 96MiB (the boot flash bank containing FFW
- * is excluded). The VxWorks loader is in partition 1.
- */
-
-#define FLASH_BASE_ADDR 0xf0000000
-#define FLASH_BANK_SIZE (128*1024*1024)
-
-MODULE_AUTHOR("Scott Wood, TimeSys Corporation <scott.wood@timesys.com>");
-MODULE_DESCRIPTION("User-programmable flash device on the Dy4 SVME182 board");
-MODULE_LICENSE("GPL");
-
-static struct map_info svme182_map = {
- .name = "Dy4 SVME182",
- .bankwidth = 32,
- .size = 128 * 1024 * 1024
-};
-
-#define BOOTIMAGE_PART_SIZE ((6*1024*1024)-RESERVED_PART_SIZE)
-
-// Allow 6MiB for the kernel
-#define NEW_BOOTIMAGE_PART_SIZE (6 * 1024 * 1024)
-// Allow 1MiB for the bootloader
-#define NEW_BOOTLOADER_PART_SIZE (1024 * 1024)
-// Use the remaining 9MiB at the end of flash for the RFS
-#define NEW_RFS_PART_SIZE (0x01000000 - NEW_BOOTLOADER_PART_SIZE - \
- NEW_BOOTIMAGE_PART_SIZE)
-
-static struct mtd_partition svme182_partitions[] = {
- // The Lower PABS is only 128KiB, but the partition code doesn't
- // like partitions that don't end on the largest erase block
- // size of the device, even if all of the erase blocks in the
- // partition are small ones. The hardware should prevent
- // writes to the actual PABS areas.
- {
- name: "Lower PABS and CPU 0 bootloader or kernel",
- size: 6*1024*1024,
- offset: 0,
- },
- {
- name: "Root Filesystem",
- size: 10*1024*1024,
- offset: MTDPART_OFS_NXTBLK
- },
- {
- name: "CPU1 Bootloader",
- size: 1024*1024,
- offset: MTDPART_OFS_NXTBLK,
- },
- {
- name: "Extra",
- size: 110*1024*1024,
- offset: MTDPART_OFS_NXTBLK
- },
- {
- name: "Foundation Firmware and Upper PABS",
- size: 1024*1024,
- offset: MTDPART_OFS_NXTBLK,
- mask_flags: MTD_WRITEABLE // read-only
- }
-};
-
-static struct mtd_info *this_mtd;
-
-static int __init init_svme182(void)
-{
- struct mtd_partition *partitions;
- int num_parts = ARRAY_SIZE(svme182_partitions);
-
- partitions = svme182_partitions;
-
- svme182_map.virt = ioremap(FLASH_BASE_ADDR, svme182_map.size);
-
- if (svme182_map.virt == 0) {
- printk("Failed to ioremap FLASH memory area.\n");
- return -EIO;
- }
-
- simple_map_init(&svme182_map);
-
- this_mtd = do_map_probe("cfi_probe", &svme182_map);
- if (!this_mtd)
- {
- iounmap((void *)svme182_map.virt);
- return -ENXIO;
- }
-
- printk(KERN_NOTICE "SVME182 flash device: %dMiB at 0x%08x\n",
- this_mtd->size >> 20, FLASH_BASE_ADDR);
-
- this_mtd->owner = THIS_MODULE;
- mtd_device_register(this_mtd, partitions, num_parts);
-
- return 0;
-}
-
-static void __exit cleanup_svme182(void)
-{
- if (this_mtd)
- {
- mtd_device_unregister(this_mtd);
- map_destroy(this_mtd);
- }
-
- if (svme182_map.virt)
- {
- iounmap((void *)svme182_map.virt);
- svme182_map.virt = 0;
- }
-
- return;
-}
-
-module_init(init_svme182);
-module_exit(cleanup_svme182);
memcpy_toio(map->virt + (to % state->win_size), from, len);
}
-static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
+static const char * const part_probe_types[] = {
+ "cmdlinepart", "RedBoot", NULL };
/**
* gpio_flash_probe() - setup a mapping for a GPIO assisted flash
+++ /dev/null
-/*
- * Flash memory access on Hynix GMS30C7201/HMS30C7202 based
- * evaluation boards
- *
- * (C) 2002 Jungjun Kim <jungjun.kim@hynix.com>
- * 2003 Thomas Gleixner <tglx@linutronix.de>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <mach/hardware.h>
-#include <asm/io.h>
-
-static struct mtd_info *mymtd;
-
-static struct map_info h720x_map = {
- .name = "H720X",
- .bankwidth = 4,
- .size = H720X_FLASH_SIZE,
- .phys = H720X_FLASH_PHYS,
-};
-
-static struct mtd_partition h720x_partitions[] = {
- {
- .name = "ArMon",
- .size = 0x00080000,
- .offset = 0,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "Env",
- .size = 0x00040000,
- .offset = 0x00080000,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "Kernel",
- .size = 0x00180000,
- .offset = 0x000c0000,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "Ramdisk",
- .size = 0x00400000,
- .offset = 0x00240000,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "jffs2",
- .size = MTDPART_SIZ_FULL,
- .offset = MTDPART_OFS_APPEND
- }
-};
-
-#define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions)
-
-/*
- * Initialize FLASH support
- */
-static int __init h720x_mtd_init(void)
-{
- h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size);
-
- if (!h720x_map.virt) {
- printk(KERN_ERR "H720x-MTD: ioremap failed\n");
- return -EIO;
- }
-
- simple_map_init(&h720x_map);
-
- // Probe for flash bankwidth 4
- printk (KERN_INFO "H720x-MTD probing 32bit FLASH\n");
- mymtd = do_map_probe("cfi_probe", &h720x_map);
- if (!mymtd) {
- printk (KERN_INFO "H720x-MTD probing 16bit FLASH\n");
- // Probe for bankwidth 2
- h720x_map.bankwidth = 2;
- mymtd = do_map_probe("cfi_probe", &h720x_map);
- }
-
- if (mymtd) {
- mymtd->owner = THIS_MODULE;
-
- mtd_device_parse_register(mymtd, NULL, NULL,
- h720x_partitions, NUM_PARTITIONS);
- return 0;
- }
-
- iounmap((void *)h720x_map.virt);
- return -ENXIO;
-}
-
-/*
- * Cleanup
- */
-static void __exit h720x_mtd_cleanup(void)
-{
-
- if (mymtd) {
- mtd_device_unregister(mymtd);
- map_destroy(mymtd);
- }
-
- if (h720x_map.virt) {
- iounmap((void *)h720x_map.virt);
- h720x_map.virt = 0;
- }
-}
-
-
-module_init(h720x_mtd_init);
-module_exit(h720x_mtd_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
-MODULE_DESCRIPTION("MTD map driver for Hynix evaluation boards");
#define NUM_FLASHBANKS 2
#define BUSWIDTH 4
-/* can be { "cfi_probe", "jedec_probe", "map_rom", NULL } */
-#define PROBETYPES { "jedec_probe", NULL }
-
#define MSG_PREFIX "impA7:" /* prefix for our printk()'s */
#define MTDID "impa7-%d" /* for mtdparts= partitioning */
static struct mtd_info *impa7_mtd[NUM_FLASHBANKS];
+static const char * const rom_probe_types[] = { "jedec_probe", NULL };
static struct map_info impa7_map[NUM_FLASHBANKS] = {
{
static int __init init_impa7(void)
{
- static const char *rom_probe_types[] = PROBETYPES;
- const char **type;
+ const char * const *type;
int i;
static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = {
{ WINDOW_ADDR0, WINDOW_SIZE0 },
static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
{
- static const char *probe_types[] =
+ static const char * const probe_types[] =
{ "cfi_probe", "jedec_probe", NULL };
- const char **type;
+ const char * const *type;
for (type = probe_types; !p->info && *type; type++)
p->info = do_map_probe(*type, &p->map);
+++ /dev/null
-/*
- * drivers/mtd/maps/ixp2000.c
- *
- * Mapping for the Intel XScale IXP2000 based systems
- *
- * Copyright (C) 2002 Intel Corp.
- * Copyright (C) 2003-2004 MontaVista Software, Inc.
- *
- * Original Author: Naeem M Afzal <naeem.m.afzal@intel.com>
- * Maintainer: Deepak Saxena <dsaxena@plexity.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-#include <mach/hardware.h>
-#include <asm/mach/flash.h>
-
-#include <linux/reboot.h>
-
-struct ixp2000_flash_info {
- struct mtd_info *mtd;
- struct map_info map;
- struct resource *res;
-};
-
-static inline unsigned long flash_bank_setup(struct map_info *map, unsigned long ofs)
-{
- unsigned long (*set_bank)(unsigned long) =
- (unsigned long(*)(unsigned long))map->map_priv_2;
-
- return (set_bank ? set_bank(ofs) : ofs);
-}
-
-#ifdef __ARMEB__
-/*
- * Rev A0 and A1 of IXP2400 silicon have a broken addressing unit which
- * causes the lower address bits to be XORed with 0x11 on 8 bit accesses
- * and XORed with 0x10 on 16 bit accesses. See the spec update, erratum 44.
- */
-static int erratum44_workaround = 0;
-
-static inline unsigned long address_fix8_write(unsigned long addr)
-{
- if (erratum44_workaround) {
- return (addr ^ 3);
- }
- return addr;
-}
-#else
-
-#define address_fix8_write(x) (x)
-#endif
-
-static map_word ixp2000_flash_read8(struct map_info *map, unsigned long ofs)
-{
- map_word val;
-
- val.x[0] = *((u8 *)(map->map_priv_1 + flash_bank_setup(map, ofs)));
- return val;
-}
-
-/*
- * We can't use the standard memcpy due to the broken SlowPort
- * address translation on rev A0 and A1 silicon and the fact that
- * we have banked flash.
- */
-static void ixp2000_flash_copy_from(struct map_info *map, void *to,
- unsigned long from, ssize_t len)
-{
- from = flash_bank_setup(map, from);
- while(len--)
- *(__u8 *) to++ = *(__u8 *)(map->map_priv_1 + from++);
-}
-
-static void ixp2000_flash_write8(struct map_info *map, map_word d, unsigned long ofs)
-{
- *(__u8 *) (address_fix8_write(map->map_priv_1 +
- flash_bank_setup(map, ofs))) = d.x[0];
-}
-
-static void ixp2000_flash_copy_to(struct map_info *map, unsigned long to,
- const void *from, ssize_t len)
-{
- to = flash_bank_setup(map, to);
- while(len--) {
- unsigned long tmp = address_fix8_write(map->map_priv_1 + to++);
- *(__u8 *)(tmp) = *(__u8 *)(from++);
- }
-}
-
-
-static int ixp2000_flash_remove(struct platform_device *dev)
-{
- struct flash_platform_data *plat = dev->dev.platform_data;
- struct ixp2000_flash_info *info = platform_get_drvdata(dev);
-
- platform_set_drvdata(dev, NULL);
-
- if(!info)
- return 0;
-
- if (info->mtd) {
- mtd_device_unregister(info->mtd);
- map_destroy(info->mtd);
- }
- if (info->map.map_priv_1)
- iounmap((void *) info->map.map_priv_1);
-
- if (info->res) {
- release_resource(info->res);
- kfree(info->res);
- }
-
- if (plat->exit)
- plat->exit();
-
- return 0;
-}
-
-
-static int ixp2000_flash_probe(struct platform_device *dev)
-{
- static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
- struct ixp2000_flash_data *ixp_data = dev->dev.platform_data;
- struct flash_platform_data *plat;
- struct ixp2000_flash_info *info;
- unsigned long window_size;
- int err = -1;
-
- if (!ixp_data)
- return -ENODEV;
-
- plat = ixp_data->platform_data;
- if (!plat)
- return -ENODEV;
-
- window_size = resource_size(dev->resource);
- dev_info(&dev->dev, "Probe of IXP2000 flash(%d banks x %dMiB)\n",
- ixp_data->nr_banks, ((u32)window_size >> 20));
-
- if (plat->width != 1) {
- dev_err(&dev->dev, "IXP2000 MTD map only supports 8-bit mode, asking for %d\n",
- plat->width * 8);
- return -EIO;
- }
-
- info = kzalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL);
- if(!info) {
- err = -ENOMEM;
- goto Error;
- }
-
- platform_set_drvdata(dev, info);
-
- /*
- * Tell the MTD layer we're not 1:1 mapped so that it does
- * not attempt to do a direct access on us.
- */
- info->map.phys = NO_XIP;
-
- info->map.size = ixp_data->nr_banks * window_size;
- info->map.bankwidth = 1;
-
- /*
- * map_priv_2 is used to store a ptr to the bank_setup routine
- */
- info->map.map_priv_2 = (unsigned long) ixp_data->bank_setup;
-
- info->map.name = dev_name(&dev->dev);
- info->map.read = ixp2000_flash_read8;
- info->map.write = ixp2000_flash_write8;
- info->map.copy_from = ixp2000_flash_copy_from;
- info->map.copy_to = ixp2000_flash_copy_to;
-
- info->res = request_mem_region(dev->resource->start,
- resource_size(dev->resource),
- dev_name(&dev->dev));
- if (!info->res) {
- dev_err(&dev->dev, "Could not reserve memory region\n");
- err = -ENOMEM;
- goto Error;
- }
-
- info->map.map_priv_1 =
- (unsigned long)ioremap(dev->resource->start,
- resource_size(dev->resource));
- if (!info->map.map_priv_1) {
- dev_err(&dev->dev, "Failed to ioremap flash region\n");
- err = -EIO;
- goto Error;
- }
-
-#if defined(__ARMEB__)
- /*
- * Enable erratum 44 workaround for NPUs with broken slowport
- */
-
- erratum44_workaround = ixp2000_has_broken_slowport();
- dev_info(&dev->dev, "Erratum 44 workaround %s\n",
- erratum44_workaround ? "enabled" : "disabled");
-#endif
-
- info->mtd = do_map_probe(plat->map_name, &info->map);
- if (!info->mtd) {
- dev_err(&dev->dev, "map_probe failed\n");
- err = -ENXIO;
- goto Error;
- }
- info->mtd->owner = THIS_MODULE;
-
- err = mtd_device_parse_register(info->mtd, probes, NULL, NULL, 0);
- if (err)
- goto Error;
-
- return 0;
-
-Error:
- ixp2000_flash_remove(dev);
- return err;
-}
-
-static struct platform_driver ixp2000_flash_driver = {
- .probe = ixp2000_flash_probe,
- .remove = ixp2000_flash_remove,
- .driver = {
- .name = "IXP2000-Flash",
- .owner = THIS_MODULE,
- },
-};
-
-module_platform_driver(ixp2000_flash_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
-MODULE_ALIAS("platform:IXP2000-Flash");
struct resource *res;
};
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
static int ixp4xx_flash_remove(struct platform_device *dev)
{
};
static const char ltq_map_name[] = "ltq_nor";
-static const char *ltq_probe_types[] = {
- "cmdlinepart", "ofpart", NULL };
+static const char * const ltq_probe_types[] = { "cmdlinepart", "ofpart", NULL };
static map_word
ltq_read16(struct map_info *map, unsigned long adr)
+++ /dev/null
-/*
- * Handle mapping of the flash on MBX860 boards
- *
- * Author: Anton Todorov
- * Copyright: (C) 2001 Emness Technology
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-
-#define WINDOW_ADDR 0xfe000000
-#define WINDOW_SIZE 0x00200000
-
-/* Flash / Partition sizing */
-#define MAX_SIZE_KiB 8192
-#define BOOT_PARTITION_SIZE_KiB 512
-#define KERNEL_PARTITION_SIZE_KiB 5632
-#define APP_PARTITION_SIZE_KiB 2048
-
-#define NUM_PARTITIONS 3
-
-/* partition_info gives details on the logical partitions that the split the
- * single flash device into. If the size if zero we use up to the end of the
- * device. */
-static struct mtd_partition partition_info[]={
- { .name = "MBX flash BOOT partition",
- .offset = 0,
- .size = BOOT_PARTITION_SIZE_KiB*1024 },
- { .name = "MBX flash DATA partition",
- .offset = BOOT_PARTITION_SIZE_KiB*1024,
- .size = (KERNEL_PARTITION_SIZE_KiB)*1024 },
- { .name = "MBX flash APPLICATION partition",
- .offset = (BOOT_PARTITION_SIZE_KiB+KERNEL_PARTITION_SIZE_KiB)*1024 }
-};
-
-
-static struct mtd_info *mymtd;
-
-struct map_info mbx_map = {
- .name = "MBX flash",
- .size = WINDOW_SIZE,
- .phys = WINDOW_ADDR,
- .bankwidth = 4,
-};
-
-static int __init init_mbx(void)
-{
- printk(KERN_NOTICE "Motorola MBX flash device: 0x%x at 0x%x\n", WINDOW_SIZE*4, WINDOW_ADDR);
- mbx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
-
- if (!mbx_map.virt) {
- printk("Failed to ioremap\n");
- return -EIO;
- }
- simple_map_init(&mbx_map);
-
- mymtd = do_map_probe("jedec_probe", &mbx_map);
- if (mymtd) {
- mymtd->owner = THIS_MODULE;
- mtd_device_register(mymtd, NULL, 0);
- mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
- return 0;
- }
-
- iounmap((void *)mbx_map.virt);
- return -ENXIO;
-}
-
-static void __exit cleanup_mbx(void)
-{
- if (mymtd) {
- mtd_device_unregister(mymtd);
- map_destroy(mymtd);
- }
- if (mbx_map.virt) {
- iounmap((void *)mbx_map.virt);
- mbx_map.virt = 0;
- }
-}
-
-module_init(init_mbx);
-module_exit(cleanup_mbx);
-
-MODULE_AUTHOR("Anton Todorov <a.todorov@emness.com>");
-MODULE_DESCRIPTION("MTD map driver for Motorola MBX860 board");
-MODULE_LICENSE("GPL");
if (err)
goto release;
- /* tsk - do_map_probe should take const char * */
- mtd = do_map_probe((char *)info->map_name, &map->map);
+ mtd = do_map_probe(info->map_name, &map->map);
err = -ENODEV;
if (!mtd)
goto release;
spin_unlock_irqrestore(&info->vpp_lock, flags);
}
-static const char *rom_probe_types[] = {
- "cfi_probe",
- "jedec_probe",
- "qinfo_probe",
- "map_rom",
- NULL };
-static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", "afs",
- NULL };
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", "qinfo_probe", "map_rom", NULL };
+
+static const char * const part_probe_types[] = {
+ "cmdlinepart", "RedBoot", "afs", NULL };
static int physmap_flash_probe(struct platform_device *dev)
{
struct physmap_flash_data *physmap_data;
struct physmap_flash_info *info;
- const char **probe_type;
- const char **part_types;
+ const char * const *probe_type;
+ const char * const *part_types;
int err = 0;
int i;
int devices_found = 0;
return 0;
}
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", "map_rom" };
+
/* Helper function to handle probing of the obsolete "direct-mapped"
* compatible binding, which has an extra "probe-type" property
* describing the type of flash probe necessary. */
struct device_node *dp = dev->dev.of_node;
const char *of_probe;
struct mtd_info *mtd;
- static const char *rom_probe_types[]
- = { "cfi_probe", "jedec_probe", "map_rom"};
int i;
dev_warn(&dev->dev, "Device tree uses obsolete \"direct-mapped\" "
specifies the list of partition probers to use. If none is given then the
default is use. These take precedence over other device tree
information. */
-static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot",
- "ofpart", "ofoldpart", NULL };
-static const char **of_get_probes(struct device_node *dp)
+static const char * const part_probe_types_def[] = {
+ "cmdlinepart", "RedBoot", "ofpart", "ofoldpart", NULL };
+
+static const char * const *of_get_probes(struct device_node *dp)
{
const char *cp;
int cplen;
return res;
}
-static void of_free_probes(const char **probes)
+static void of_free_probes(const char * const *probes)
{
if (probes != part_probe_types_def)
kfree(probes);
static struct of_device_id of_flash_match[];
static int of_flash_probe(struct platform_device *dev)
{
- const char **part_probe_types;
+ const char * const *part_probe_types;
const struct of_device_id *match;
struct device_node *dp = dev->dev.of_node;
struct resource res;
* supplied by the platform_data struct */
if (pdata->map_probes) {
- const char **map_probes = pdata->map_probes;
+ const char * const *map_probes = pdata->map_probes;
for ( ; !info->mtd && *map_probes; map_probes++)
info->mtd = do_map_probe(*map_probes , &info->map);
struct map_info map;
};
-
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
static int pxa2xx_flash_probe(struct platform_device *pdev)
{
return 0;
}
-static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", NULL };
static int rbtx4939_flash_probe(struct platform_device *dev)
{
struct rbtx4939_flash_data *pdata;
struct rbtx4939_flash_info *info;
struct resource *res;
- const char **probe_type;
+ const char * const *probe_type;
int err = 0;
unsigned long size;
+++ /dev/null
-/*
- * Handle mapping of the flash on the RPX Lite and CLLF boards
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-
-
-#define WINDOW_ADDR 0xfe000000
-#define WINDOW_SIZE 0x800000
-
-static struct mtd_info *mymtd;
-
-static struct map_info rpxlite_map = {
- .name = "RPX",
- .size = WINDOW_SIZE,
- .bankwidth = 4,
- .phys = WINDOW_ADDR,
-};
-
-static int __init init_rpxlite(void)
-{
- printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR);
- rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
-
- if (!rpxlite_map.virt) {
- printk("Failed to ioremap\n");
- return -EIO;
- }
- simple_map_init(&rpxlite_map);
- mymtd = do_map_probe("cfi_probe", &rpxlite_map);
- if (mymtd) {
- mymtd->owner = THIS_MODULE;
- mtd_device_register(mymtd, NULL, 0);
- return 0;
- }
-
- iounmap((void *)rpxlite_map.virt);
- return -ENXIO;
-}
-
-static void __exit cleanup_rpxlite(void)
-{
- if (mymtd) {
- mtd_device_unregister(mymtd);
- map_destroy(mymtd);
- }
- if (rpxlite_map.virt) {
- iounmap((void *)rpxlite_map.virt);
- rpxlite_map.virt = 0;
- }
-}
-
-module_init(init_rpxlite);
-module_exit(cleanup_rpxlite);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arnold Christensen <AKC@pel.dk>");
-MODULE_DESCRIPTION("MTD map driver for RPX Lite and CLLF boards");
return ERR_PTR(ret);
}
-static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
+static const char * const part_probes[] = { "cmdlinepart", "RedBoot", NULL };
static int sa1100_mtd_probe(struct platform_device *pdev)
{
.bankwidth = 4,
};
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
#ifdef CONFIG_MTD_SUPERH_RESERVE
static struct mtd_partition superh_se_partitions[] = {
+++ /dev/null
-/*
- * Handle mapping of the flash memory access routines
- * on TQM8xxL based devices.
- *
- * based on rpxlite.c
- *
- * Copyright(C) 2001 Kirk Lee <kirk@hpc.ee.ntu.edu.tw>
- *
- * This code is GPLed
- *
- */
-
-/*
- * According to TQM8xxL hardware manual, TQM8xxL series have
- * following flash memory organisations:
- * | capacity | | chip type | | bank0 | | bank1 |
- * 2MiB 512Kx16 2MiB 0
- * 4MiB 1Mx16 4MiB 0
- * 8MiB 1Mx16 4MiB 4MiB
- * Thus, we choose CONFIG_MTD_CFI_I2 & CONFIG_MTD_CFI_B4 at
- * kernel configuration.
- */
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-
-#define FLASH_ADDR 0x40000000
-#define FLASH_SIZE 0x00800000
-#define FLASH_BANK_MAX 4
-
-// trivial struct to describe partition information
-struct mtd_part_def
-{
- int nums;
- unsigned char *type;
- struct mtd_partition* mtd_part;
-};
-
-//static struct mtd_info *mymtd;
-static struct mtd_info* mtd_banks[FLASH_BANK_MAX];
-static struct map_info* map_banks[FLASH_BANK_MAX];
-static struct mtd_part_def part_banks[FLASH_BANK_MAX];
-static unsigned long num_banks;
-static void __iomem *start_scan_addr;
-
-/*
- * Here are partition information for all known TQM8xxL series devices.
- * See include/linux/mtd/partitions.h for definition of the mtd_partition
- * structure.
- *
- * The *_max_flash_size is the maximum possible mapped flash size which
- * is not necessarily the actual flash size. It must correspond to the
- * value specified in the mapping definition defined by the
- * "struct map_desc *_io_desc" for the corresponding machine.
- */
-
-/* Currently, TQM8xxL has up to 8MiB flash */
-static unsigned long tqm8xxl_max_flash_size = 0x00800000;
-
-/* partition definition for first flash bank
- * (cf. "drivers/char/flash_config.c")
- */
-static struct mtd_partition tqm8xxl_partitions[] = {
- {
- .name = "ppcboot",
- .offset = 0x00000000,
- .size = 0x00020000, /* 128KB */
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "kernel", /* default kernel image */
- .offset = 0x00020000,
- .size = 0x000e0000,
- .mask_flags = MTD_WRITEABLE, /* force read-only */
- },
- {
- .name = "user",
- .offset = 0x00100000,
- .size = 0x00100000,
- },
- {
- .name = "initrd",
- .offset = 0x00200000,
- .size = 0x00200000,
- }
-};
-/* partition definition for second flash bank */
-static struct mtd_partition tqm8xxl_fs_partitions[] = {
- {
- .name = "cramfs",
- .offset = 0x00000000,
- .size = 0x00200000,
- },
- {
- .name = "jffs",
- .offset = 0x00200000,
- .size = 0x00200000,
- //.size = MTDPART_SIZ_FULL,
- }
-};
-
-static int __init init_tqm_mtd(void)
-{
- int idx = 0, ret = 0;
- unsigned long flash_addr, flash_size, mtd_size = 0;
- /* pointer to TQM8xxL board info data */
- bd_t *bd = (bd_t *)__res;
-
- flash_addr = bd->bi_flashstart;
- flash_size = bd->bi_flashsize;
-
- //request maximum flash size address space
- start_scan_addr = ioremap(flash_addr, flash_size);
- if (!start_scan_addr) {
- printk(KERN_WARNING "%s:Failed to ioremap address:0x%x\n", __func__, flash_addr);
- return -EIO;
- }
-
- for (idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
- if(mtd_size >= flash_size)
- break;
-
- printk(KERN_INFO "%s: chip probing count %d\n", __func__, idx);
-
- map_banks[idx] = kzalloc(sizeof(struct map_info), GFP_KERNEL);
- if(map_banks[idx] == NULL) {
- ret = -ENOMEM;
- /* FIXME: What if some MTD devices were probed already? */
- goto error_mem;
- }
-
- map_banks[idx]->name = kmalloc(16, GFP_KERNEL);
-
- if (!map_banks[idx]->name) {
- ret = -ENOMEM;
- /* FIXME: What if some MTD devices were probed already? */
- goto error_mem;
- }
- sprintf(map_banks[idx]->name, "TQM8xxL%d", idx);
-
- map_banks[idx]->size = flash_size;
- map_banks[idx]->bankwidth = 4;
-
- simple_map_init(map_banks[idx]);
-
- map_banks[idx]->virt = start_scan_addr;
- map_banks[idx]->phys = flash_addr;
- /* FIXME: This looks utterly bogus, but I'm trying to
- preserve the behaviour of the original (shown here)...
-
- map_banks[idx]->map_priv_1 =
- start_scan_addr + ((idx > 0) ?
- (mtd_banks[idx-1] ? mtd_banks[idx-1]->size : 0) : 0);
- */
-
- if (idx && mtd_banks[idx-1]) {
- map_banks[idx]->virt += mtd_banks[idx-1]->size;
- map_banks[idx]->phys += mtd_banks[idx-1]->size;
- }
-
- //start to probe flash chips
- mtd_banks[idx] = do_map_probe("cfi_probe", map_banks[idx]);
-
- if (mtd_banks[idx]) {
- mtd_banks[idx]->owner = THIS_MODULE;
- mtd_size += mtd_banks[idx]->size;
- num_banks++;
-
- printk(KERN_INFO "%s: bank%d, name:%s, size:%dbytes \n", __func__, num_banks,
- mtd_banks[idx]->name, mtd_banks[idx]->size);
- }
- }
-
- /* no supported flash chips found */
- if (!num_banks) {
- printk(KERN_NOTICE "TQM8xxL: No support flash chips found!\n");
- ret = -ENXIO;
- goto error_mem;
- }
-
- /*
- * Select Static partition definitions
- */
- part_banks[0].mtd_part = tqm8xxl_partitions;
- part_banks[0].type = "Static image";
- part_banks[0].nums = ARRAY_SIZE(tqm8xxl_partitions);
-
- part_banks[1].mtd_part = tqm8xxl_fs_partitions;
- part_banks[1].type = "Static file system";
- part_banks[1].nums = ARRAY_SIZE(tqm8xxl_fs_partitions);
-
- for(idx = 0; idx < num_banks ; idx++) {
- if (part_banks[idx].nums == 0)
- printk(KERN_NOTICE "TQM flash%d: no partition info available, registering whole flash at once\n", idx);
- else
- printk(KERN_NOTICE "TQM flash%d: Using %s partition definition\n",
- idx, part_banks[idx].type);
- mtd_device_register(mtd_banks[idx], part_banks[idx].mtd_part,
- part_banks[idx].nums);
- }
- return 0;
-error_mem:
- for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
- if(map_banks[idx] != NULL) {
- kfree(map_banks[idx]->name);
- map_banks[idx]->name = NULL;
- kfree(map_banks[idx]);
- map_banks[idx] = NULL;
- }
- }
-error:
- iounmap(start_scan_addr);
- return ret;
-}
-
-static void __exit cleanup_tqm_mtd(void)
-{
- unsigned int idx = 0;
- for(idx = 0 ; idx < num_banks ; idx++) {
- /* destroy mtd_info previously allocated */
- if (mtd_banks[idx]) {
- mtd_device_unregister(mtd_banks[idx]);
- map_destroy(mtd_banks[idx]);
- }
- /* release map_info not used anymore */
- kfree(map_banks[idx]->name);
- kfree(map_banks[idx]);
- }
-
- if (start_scan_addr) {
- iounmap(start_scan_addr);
- start_scan_addr = 0;
- }
-}
-
-module_init(init_tqm_mtd);
-module_exit(cleanup_tqm_mtd);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kirk Lee <kirk@hpc.ee.ntu.edu.tw>");
-MODULE_DESCRIPTION("MTD map driver for TQM8xxL boards");
tsunami_flash_mtd = 0;
}
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", "map_rom", NULL };
static int __init init_tsunami_flash(void)
{
- static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL };
- char **type;
+ const char * const *type;
tsunami_tig_writeb(FLASH_ENABLE_BYTE, FLASH_ENABLE_PORT);
#include <asm/uaccess.h>
+#include "mtdcore.h"
+
static DEFINE_MUTEX(mtd_mutex);
/*
wake_up((wait_queue_head_t *)instr->priv);
}
-#ifdef CONFIG_HAVE_MTD_OTP
static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
{
struct mtd_info *mtd = mfi->mtd;
size_t retlen;
- int ret = 0;
-
- /*
- * Make a fake call to mtd_read_fact_prot_reg() to check if OTP
- * operations are supported.
- */
- if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == -EOPNOTSUPP)
- return -EOPNOTSUPP;
switch (mode) {
case MTD_OTP_FACTORY:
+ if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) ==
+ -EOPNOTSUPP)
+ return -EOPNOTSUPP;
+
mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
break;
case MTD_OTP_USER:
+ if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) ==
+ -EOPNOTSUPP)
+ return -EOPNOTSUPP;
+
mfi->mode = MTD_FILE_MODE_OTP_USER;
break;
- default:
- ret = -EINVAL;
case MTD_OTP_OFF:
+ mfi->mode = MTD_FILE_MODE_NORMAL;
break;
+ default:
+ return -EINVAL;
}
- return ret;
+
+ return 0;
}
-#else
-# define otp_select_filemode(f,m) -EOPNOTSUPP
-#endif
static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
uint64_t start, uint32_t length, void __user *ptr,
break;
}
-#ifdef CONFIG_HAVE_MTD_OTP
case OTPSELECT:
{
int mode;
ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
break;
}
-#endif
/* This ioctl is being deprecated - it truncates the ECC layout */
case ECCGETLAYOUT:
};
MODULE_ALIAS_FS("mtd_inodefs");
-static int __init init_mtdchar(void)
+int __init init_mtdchar(void)
{
int ret;
ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
"mtd", &mtd_fops);
if (ret < 0) {
- pr_notice("Can't allocate major number %d for "
- "Memory Technology Devices.\n", MTD_CHAR_MAJOR);
+ pr_err("Can't allocate major number %d for MTD\n",
+ MTD_CHAR_MAJOR);
return ret;
}
ret = register_filesystem(&mtd_inodefs_type);
if (ret) {
- pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
+ pr_err("Can't register mtd_inodefs filesystem, error %d\n",
+ ret);
goto err_unregister_chdev;
}
+
return ret;
err_unregister_chdev:
return ret;
}
-static void __exit cleanup_mtdchar(void)
+void __exit cleanup_mtdchar(void)
{
unregister_filesystem(&mtd_inodefs_type);
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
}
-module_init(init_mtdchar);
-module_exit(cleanup_mtdchar);
-
-MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("Direct character-device access to MTD devices");
MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
#include <linux/mtd/partitions.h>
#include "mtdcore.h"
+
/*
* backing device capabilities for non-mappable devices (such as NAND flash)
* - permits private mappings, copies are taken of the data
static LIST_HEAD(mtd_notifiers);
-#if defined(CONFIG_MTD_CHAR) || defined(CONFIG_MTD_CHAR_MODULE)
#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
-#else
-#define MTD_DEVT(index) 0
-#endif
/* REVISIT once MTD uses the driver model better, whoever allocates
* the mtd_info will probably want to use the release() hook...
*
* Returns zero in case of success and a negative error code in case of failure.
*/
-int mtd_device_parse_register(struct mtd_info *mtd, const char **types,
+int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
struct mtd_part_parser_data *parser_data,
const struct mtd_partition *parts,
int nr_parts)
/*====================================================================*/
/* Support for /proc/mtd */
-static struct proc_dir_entry *proc_mtd;
-
static int mtd_proc_show(struct seq_file *m, void *v)
{
struct mtd_info *mtd;
return ret;
}
+static struct proc_dir_entry *proc_mtd;
+
static int __init init_mtd(void)
{
int ret;
if (ret)
goto err_bdi3;
-#ifdef CONFIG_PROC_FS
proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
-#endif /* CONFIG_PROC_FS */
+
+ ret = init_mtdchar();
+ if (ret)
+ goto out_procfs;
+
return 0;
+out_procfs:
+ if (proc_mtd)
+ remove_proc_entry("mtd", NULL);
err_bdi3:
bdi_destroy(&mtd_bdi_ro_mappable);
err_bdi2:
static void __exit cleanup_mtd(void)
{
-#ifdef CONFIG_PROC_FS
+ cleanup_mtdchar();
if (proc_mtd)
- remove_proc_entry( "mtd", NULL);
-#endif /* CONFIG_PROC_FS */
+ remove_proc_entry("mtd", NULL);
class_unregister(&mtd_class);
bdi_destroy(&mtd_bdi_unmappable);
bdi_destroy(&mtd_bdi_ro_mappable);
-/* linux/drivers/mtd/mtdcore.h
- *
- * Header file for driver private mtdcore exports
- *
+/*
+ * These are exported solely for the purpose of mtd_blkdevs.c and mtdchar.c.
+ * You should not use them for _anything_ else.
*/
-/* These are exported solely for the purpose of mtd_blkdevs.c. You
- should not use them for _anything_ else */
-
extern struct mutex mtd_table_mutex;
-extern struct mtd_info *__mtd_next_device(int i);
-extern int add_mtd_device(struct mtd_info *mtd);
-extern int del_mtd_device(struct mtd_info *mtd);
-extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *,
- int);
-extern int del_mtd_partitions(struct mtd_info *);
-extern int parse_mtd_partitions(struct mtd_info *master, const char **types,
- struct mtd_partition **pparts,
- struct mtd_part_parser_data *data);
+struct mtd_info *__mtd_next_device(int i);
+int add_mtd_device(struct mtd_info *mtd);
+int del_mtd_device(struct mtd_info *mtd);
+int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
+int del_mtd_partitions(struct mtd_info *);
+int parse_mtd_partitions(struct mtd_info *master, const char * const *types,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data);
+
+int __init init_mtdchar(void);
+void __exit cleanup_mtdchar(void);
#define mtd_for_each_device(mtd) \
for ((mtd) = __mtd_next_device(0); \
* Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
* are changing this array!
*/
-static const char *default_mtd_part_types[] = {
+static const char * const default_mtd_part_types[] = {
"cmdlinepart",
"ofpart",
NULL
* o a positive number of found partitions, in which case on exit @pparts will
* point to an array containing this number of &struct mtd_info objects.
*/
-int parse_mtd_partitions(struct mtd_info *master, const char **types,
+int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
tristate
default n
-config MTD_NAND_MUSEUM_IDS
- bool "Enable chip ids for obsolete ancient NAND devices"
- default n
- help
- Enable this option only when your board has first generation
- NAND chips (page size 256 byte, erase size 4-8KiB). The IDs
- of these chips were reused by later, larger chips.
-
config MTD_NAND_DENALI
tristate "Support Denali NAND controller"
help
scratch register here to enable this feature. On Intel Moorestown
boards, the scratch register is at 0xFF108018.
-config MTD_NAND_H1900
- tristate "iPAQ H1900 flash"
- depends on ARCH_PXA && BROKEN
- help
- This enables the driver for the iPAQ h1900 flash.
-
config MTD_NAND_GPIO
tristate "GPIO NAND Flash driver"
- depends on GENERIC_GPIO && ARM
+ depends on GPIOLIB && ARM
help
This enables a GPIO based NAND flash driver.
If unsure, say N.
-config MTD_NAND_RTC_FROM4
- tristate "Renesas Flash ROM 4-slot interface board (FROM_BOARD4)"
- depends on SH_SOLUTION_ENGINE
- select REED_SOLOMON
- select REED_SOLOMON_DEC8
- select BITREVERSE
- help
- This enables the driver for the Renesas Technology AG-AND
- flash interface board (FROM_BOARD4)
-
-config MTD_NAND_PPCHAMELEONEVB
- tristate "NAND Flash device on PPChameleonEVB board"
- depends on PPCHAMELEONEVB && BROKEN
- help
- This enables the NAND flash driver on the PPChameleon EVB Board.
-
config MTD_NAND_S3C2410
tristate "NAND Flash support for Samsung S3C SoCs"
depends on ARCH_S3C24XX || ARCH_S3C64XX
obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o
obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
-obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
obj-$(CONFIG_MTD_NAND_DOCG4) += docg4.o
obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o
-obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
-obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
},
};
-static int __init atmel_nand_init(void)
-{
- return platform_driver_probe(&atmel_nand_driver, atmel_nand_probe);
-}
-
-
-static void __exit atmel_nand_exit(void)
-{
- platform_driver_unregister(&atmel_nand_driver);
-}
-
-
-module_init(atmel_nand_init);
-module_exit(atmel_nand_exit);
+module_platform_driver_probe(atmel_nand_driver, atmel_nand_probe);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rick Bronson");
},
};
-static int __init bf5xx_nand_init(void)
-{
- printk(KERN_INFO "%s, Version %s (c) 2007 Analog Devices, Inc.\n",
- DRV_DESC, DRV_VERSION);
-
- return platform_driver_register(&bf5xx_nand_driver);
-}
-
-static void __exit bf5xx_nand_exit(void)
-{
- platform_driver_unregister(&bf5xx_nand_driver);
-}
-
-module_init(bf5xx_nand_init);
-module_exit(bf5xx_nand_exit);
+module_platform_driver(bf5xx_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRV_AUTHOR);
case NAND_CMD_SEQIN:
case NAND_CMD_RNDIN:
case NAND_CMD_STATUS:
- case NAND_CMD_DEPLETE1:
case NAND_CMD_RNDOUT:
- case NAND_CMD_STATUS_ERROR:
- case NAND_CMD_STATUS_ERROR0:
- case NAND_CMD_STATUS_ERROR1:
- case NAND_CMD_STATUS_ERROR2:
- case NAND_CMD_STATUS_ERROR3:
cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
return;
}
}
static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page,
- int cached, int raw)
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw)
{
int status;
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_data/mtd-davinci.h>
#include <linux/platform_data/mtd-davinci-aemif.h>
return pdev->dev.platform_data;
}
#else
-#define davinci_nand_of_match NULL
static struct davinci_nand_pdata
*nand_davinci_get_pdata(struct platform_device *pdev)
{
.driver = {
.name = "davinci_nand",
.owner = THIS_MODULE,
- .of_match_table = davinci_nand_of_match,
+ .of_match_table = of_match_ptr(davinci_nand_of_match),
},
};
MODULE_ALIAS("platform:davinci_nand");
-static int __init nand_davinci_init(void)
-{
- return platform_driver_probe(&nand_davinci_driver, nand_davinci_probe);
-}
-module_init(nand_davinci_init);
-
-static void __exit nand_davinci_exit(void)
-{
- platform_driver_unregister(&nand_davinci_driver);
-}
-module_exit(nand_davinci_exit);
+module_platform_driver_probe(nand_davinci_driver, nand_davinci_probe);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Texas Instruments");
}
ptr = devm_ioremap_nocache(dev, res->start, resource_size(res));
- if (!res)
+ if (!ptr)
dev_err(dev, "ioremap_nocache of %s failed!", res->name);
return ptr;
denali->irq = platform_get_irq(ofdev, 0);
if (denali->irq < 0) {
dev_err(&ofdev->dev, "no irq defined\n");
- return -ENXIO;
+ return denali->irq;
}
denali->flash_reg = request_and_map(&ofdev->dev, denali_reg);
.driver = {
.name = "denali-nand-dt",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(denali_nand_dt_ids),
+ .of_match_table = denali_nand_dt_ids,
},
};
-static int __init denali_init_dt(void)
-{
- return platform_driver_register(&denali_dt_driver);
-}
-module_init(denali_init_dt);
-
-static void __exit denali_exit_dt(void)
-{
- platform_driver_unregister(&denali_dt_driver);
-}
-module_exit(denali_exit_dt);
+module_platform_driver(denali_dt_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamie Iles");
.remove = __exit_p(cleanup_docg4),
};
-static int __init docg4_init(void)
-{
- return platform_driver_probe(&docg4_driver, probe_docg4);
-}
-
-static void __exit docg4_exit(void)
-{
- platform_driver_unregister(&docg4_driver);
-}
-
-module_init(docg4_init);
-module_exit(docg4_exit);
+module_platform_driver_probe(docg4_driver, probe_docg4);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mike Dunn");
},
};
-static int __init fsmc_nand_init(void)
-{
- return platform_driver_probe(&fsmc_nand_driver,
- fsmc_nand_probe);
-}
-module_init(fsmc_nand_init);
-
-static void __exit fsmc_nand_exit(void)
-{
- platform_driver_unregister(&fsmc_nand_driver);
-}
-module_exit(fsmc_nand_exit);
+module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
return r;
}
#else /* CONFIG_OF */
-#define gpio_nand_id_table NULL
static inline int gpio_nand_get_config_of(const struct device *dev,
struct gpio_nand_platdata *plat)
{
if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
gpio_free(gpiomtd->plat.gpio_rdy);
- kfree(gpiomtd);
-
return 0;
}
if (!res0)
return -EINVAL;
- gpiomtd = kzalloc(sizeof(*gpiomtd), GFP_KERNEL);
+ gpiomtd = devm_kzalloc(&dev->dev, sizeof(*gpiomtd), GFP_KERNEL);
if (gpiomtd == NULL) {
dev_err(&dev->dev, "failed to create NAND MTD\n");
return -ENOMEM;
iounmap(gpiomtd->nand_chip.IO_ADDR_R);
release_mem_region(res0->start, resource_size(res0));
err_map:
- kfree(gpiomtd);
return ret;
}
.remove = gpio_nand_remove,
.driver = {
.name = "gpio-nand",
- .of_match_table = gpio_nand_id_table,
+ .of_match_table = of_match_ptr(gpio_nand_id_table),
},
};
+++ /dev/null
-/*
- * drivers/mtd/nand/h1910.c
- *
- * Copyright (C) 2003 Joshua Wise (joshua@joshuawise.com)
- *
- * Derived from drivers/mtd/nand/edb7312.c
- * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
- * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash device found on the
- * iPAQ h1910 board which utilizes the Samsung K9F2808 part. This is
- * a 128Mibit (16MiB x 8 bits) NAND flash device.
- */
-
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <mach/hardware.h>
-#include <asm/sizes.h>
-#include <mach/h1900-gpio.h>
-#include <mach/ipaq.h>
-
-/*
- * MTD structure for EDB7312 board
- */
-static struct mtd_info *h1910_nand_mtd = NULL;
-
-/*
- * Module stuff
- */
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info[] = {
- {name:"h1910 NAND Flash",
- offset:0,
- size:16 * 1024 * 1024}
-};
-
-#define NUM_PARTITIONS 1
-
-/*
- * hardware specific access to control-lines
- *
- * NAND_NCE: bit 0 - don't care
- * NAND_CLE: bit 1 - address bit 2
- * NAND_ALE: bit 2 - address bit 3
- */
-static void h1910_hwcontrol(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W | ((ctrl & 0x6) << 1));
-}
-
-/*
- * read device ready pin
- */
-#if 0
-static int h1910_device_ready(struct mtd_info *mtd)
-{
- return (GPLR(55) & GPIO_bit(55));
-}
-#endif
-
-/*
- * Main initialization routine
- */
-static int __init h1910_init(void)
-{
- struct nand_chip *this;
- void __iomem *nandaddr;
-
- if (!machine_is_h1900())
- return -ENODEV;
-
- nandaddr = ioremap(0x08000000, 0x1000);
- if (!nandaddr) {
- printk("Failed to ioremap nand flash.\n");
- return -ENOMEM;
- }
-
- /* Allocate memory for MTD device structure and private data */
- h1910_nand_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!h1910_nand_mtd) {
- printk("Unable to allocate h1910 NAND MTD device structure.\n");
- iounmap((void *)nandaddr);
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&h1910_nand_mtd[1]);
-
- /* Initialize structures */
- memset(h1910_nand_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- h1910_nand_mtd->priv = this;
- h1910_nand_mtd->owner = THIS_MODULE;
-
- /*
- * Enable VPEN
- */
- GPSR(37) = GPIO_bit(37);
-
- /* insert callbacks */
- this->IO_ADDR_R = nandaddr;
- this->IO_ADDR_W = nandaddr;
- this->cmd_ctrl = h1910_hwcontrol;
- this->dev_ready = NULL; /* unknown whether that was correct or not so we will just do it like this */
- /* 15 us command delay time */
- this->chip_delay = 50;
- this->ecc.mode = NAND_ECC_SOFT;
-
- /* Scan to find existence of the device */
- if (nand_scan(h1910_nand_mtd, 1)) {
- printk(KERN_NOTICE "No NAND device - returning -ENXIO\n");
- kfree(h1910_nand_mtd);
- iounmap((void *)nandaddr);
- return -ENXIO;
- }
-
- /* Register the partitions */
- mtd_device_parse_register(h1910_nand_mtd, NULL, NULL, partition_info,
- NUM_PARTITIONS);
-
- /* Return happy */
- return 0;
-}
-
-module_init(h1910_init);
-
-/*
- * Clean up routine
- */
-static void __exit h1910_cleanup(void)
-{
- struct nand_chip *this = (struct nand_chip *)&h1910_nand_mtd[1];
-
- /* Release resources, unregister device */
- nand_release(h1910_nand_mtd);
-
- /* Release io resource */
- iounmap((void *)this->IO_ADDR_W);
-
- /* Free the MTD device structure */
- kfree(h1910_nand_mtd);
-}
-
-module_exit(h1910_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Joshua Wise <joshua at joshuawise dot com>");
-MODULE_DESCRIPTION("NAND flash driver for iPAQ h1910");
}
static int lpc32xx_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page,
- int cached, int raw)
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw)
{
int res;
* Overview:
* This is the generic MTD driver for NAND flash devices. It should be
* capable of working with almost all NAND chips currently available.
- * Basic support for AG-AND chips is provided.
*
* Additional technical information is available on
* http://www.linux-mtd.infradead.org/doc/nand.html
* Enable cached programming for 2k page size chips
* Check, if mtd->ecctype should be set to MTD_ECC_HW
* if we have HW ECC support.
- * The AG-AND chips have nice features for speed improvement,
- * which are not supported yet. Read / program 4 pages in one go.
* BBT table is not serialized, has to be fixed
*
* This program is free software; you can redistribute it and/or modify
* @page_addr: the page address for this command, -1 if none
*
* Send command to NAND device. This function is used for small page devices
- * (256/512 Bytes per page).
+ * (512 Bytes per page).
*/
static void nand_command(struct mtd_info *mtd, unsigned int command,
int column, int page_addr)
}
/* Command latch cycle */
- chip->cmd_ctrl(mtd, command & 0xff,
- NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
if (column != -1 || page_addr != -1) {
int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
case NAND_CMD_SEQIN:
case NAND_CMD_RNDIN:
case NAND_CMD_STATUS:
- case NAND_CMD_DEPLETE1:
- return;
-
- case NAND_CMD_STATUS_ERROR:
- case NAND_CMD_STATUS_ERROR0:
- case NAND_CMD_STATUS_ERROR1:
- case NAND_CMD_STATUS_ERROR2:
- case NAND_CMD_STATUS_ERROR3:
- /* Read error status commands require only a short delay */
- udelay(chip->chip_delay);
return;
case NAND_CMD_RESET:
*/
ndelay(100);
- if ((state == FL_ERASING) && (chip->options & NAND_IS_AND))
- chip->cmdfunc(mtd, NAND_CMD_STATUS_MULTI, -1, -1);
- else
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
if (in_interrupt() || oops_in_progress)
panic_nand_wait(mtd, chip, timeo);
}
/**
- * nand_read_subpage - [REPLACEABLE] software ECC based sub-page read function
+ * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
* @mtd: mtd info structure
* @chip: nand chip info structure
* @data_offs: offset of requested data within the page
return 0;
}
+
+/**
+ * nand_write_subpage_hwecc - [REPLACABLE] hardware ECC based subpage write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @column: column address of subpage within the page
+ * @data_len: data length
+ * @oob_required: must write chip->oob_poi to OOB
+ */
+static int nand_write_subpage_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *data_buf,
+ int oob_required)
+{
+ uint8_t *oob_buf = chip->oob_poi;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int ecc_steps = chip->ecc.steps;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint32_t start_step = offset / ecc_size;
+ uint32_t end_step = (offset + data_len - 1) / ecc_size;
+ int oob_bytes = mtd->oobsize / ecc_steps;
+ int step, i;
+
+ for (step = 0; step < ecc_steps; step++) {
+ /* configure controller for WRITE access */
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ /* write data (untouched subpages already masked by 0xFF) */
+ chip->write_buf(mtd, data_buf, ecc_size);
+
+ /* mask ECC of un-touched subpages by padding 0xFF */
+ if ((step < start_step) || (step > end_step))
+ memset(ecc_calc, 0xff, ecc_bytes);
+ else
+ chip->ecc.calculate(mtd, data_buf, ecc_calc);
+
+ /* mask OOB of un-touched subpages by padding 0xFF */
+ /* if oob_required, preserve OOB metadata of written subpage */
+ if (!oob_required || (step < start_step) || (step > end_step))
+ memset(oob_buf, 0xff, oob_bytes);
+
+ data_buf += ecc_size;
+ ecc_calc += ecc_bytes;
+ oob_buf += oob_bytes;
+ }
+
+ /* copy calculated ECC for whole page to chip->buffer->oob */
+ /* this include masked-value(0xFF) for unwritten subpages */
+ ecc_calc = chip->buffers->ecccalc;
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ /* write OOB buffer to NAND device */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+
/**
* nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
* @mtd: mtd info structure
* nand_write_page - [REPLACEABLE] write one page
* @mtd: MTD device structure
* @chip: NAND chip descriptor
+ * @offset: address offset within the page
+ * @data_len: length of actual data to be written
* @buf: the data to write
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
* @raw: use _raw version of write_page
*/
static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page,
- int cached, int raw)
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw)
{
- int status;
+ int status, subpage;
+
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
+ chip->ecc.write_subpage)
+ subpage = offset || (data_len < mtd->writesize);
+ else
+ subpage = 0;
chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
if (unlikely(raw))
- status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
+ status = chip->ecc.write_page_raw(mtd, chip, buf,
+ oob_required);
+ else if (subpage)
+ status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
+ buf, oob_required);
else
status = chip->ecc.write_page(mtd, chip, buf, oob_required);
*/
cached = 0;
- if (!cached || !(chip->options & NAND_CACHEPRG)) {
+ if (!cached || !NAND_HAS_CACHEPROG(chip)) {
chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
status = chip->waitfunc(mtd, chip);
uint8_t *oob = ops->oobbuf;
uint8_t *buf = ops->datbuf;
- int ret, subpage;
+ int ret;
int oob_required = oob ? 1 : 0;
ops->retlen = 0;
}
column = to & (mtd->writesize - 1);
- subpage = column || (writelen & (mtd->writesize - 1));
-
- if (subpage && oob)
- return -EINVAL;
chipnr = (int)(to >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
/* We still need to erase leftover OOB data */
memset(chip->oob_poi, 0xff, mtd->oobsize);
}
-
- ret = chip->write_page(mtd, chip, wbuf, oob_required, page,
- cached, (ops->mode == MTD_OPS_RAW));
+ ret = chip->write_page(mtd, chip, column, bytes, wbuf,
+ oob_required, page, cached,
+ (ops->mode == MTD_OPS_RAW));
if (ret)
break;
chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
}
-/**
- * multi_erase_cmd - [GENERIC] AND specific block erase command function
- * @mtd: MTD device structure
- * @page: the page address of the block which will be erased
- *
- * AND multi block erase command function. Erase 4 consecutive blocks.
- */
-static void multi_erase_cmd(struct mtd_info *mtd, int page)
-{
- struct nand_chip *chip = mtd->priv;
- /* Send commands to erase a block */
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
- chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
-}
-
/**
* nand_erase - [MTD Interface] erase block(s)
* @mtd: MTD device structure
return nand_erase_nand(mtd, instr, 0);
}
-#define BBT_PAGE_MASK 0xffffff3f
/**
* nand_erase_nand - [INTERN] erase block(s)
* @mtd: MTD device structure
{
int page, status, pages_per_block, ret, chipnr;
struct nand_chip *chip = mtd->priv;
- loff_t rewrite_bbt[NAND_MAX_CHIPS] = {0};
- unsigned int bbt_masked_page = 0xffffffff;
loff_t len;
pr_debug("%s: start = 0x%012llx, len = %llu\n",
goto erase_exit;
}
- /*
- * If BBT requires refresh, set the BBT page mask to see if the BBT
- * should be rewritten. Otherwise the mask is set to 0xffffffff which
- * can not be matched. This is also done when the bbt is actually
- * erased to avoid recursive updates.
- */
- if (chip->options & BBT_AUTO_REFRESH && !allowbbt)
- bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK;
-
/* Loop through the pages */
len = instr->len;
goto erase_exit;
}
- /*
- * If BBT requires refresh, set the BBT rewrite flag to the
- * page being erased.
- */
- if (bbt_masked_page != 0xffffffff &&
- (page & BBT_PAGE_MASK) == bbt_masked_page)
- rewrite_bbt[chipnr] =
- ((loff_t)page << chip->page_shift);
-
/* Increment page address and decrement length */
len -= (1 << chip->phys_erase_shift);
page += pages_per_block;
chipnr++;
chip->select_chip(mtd, -1);
chip->select_chip(mtd, chipnr);
-
- /*
- * If BBT requires refresh and BBT-PERCHIP, set the BBT
- * page mask to see if this BBT should be rewritten.
- */
- if (bbt_masked_page != 0xffffffff &&
- (chip->bbt_td->options & NAND_BBT_PERCHIP))
- bbt_masked_page = chip->bbt_td->pages[chipnr] &
- BBT_PAGE_MASK;
}
}
instr->state = MTD_ERASE_DONE;
if (!ret)
mtd_erase_callback(instr);
- /*
- * If BBT requires refresh and erase was successful, rewrite any
- * selected bad block tables.
- */
- if (bbt_masked_page == 0xffffffff || ret)
- return ret;
-
- for (chipnr = 0; chipnr < chip->numchips; chipnr++) {
- if (!rewrite_bbt[chipnr])
- continue;
- /* Update the BBT for chip */
- pr_debug("%s: nand_update_bbt (%d:0x%0llx 0x%0x)\n",
- __func__, chipnr, rewrite_bbt[chipnr],
- chip->bbt_td->pages[chipnr]);
- nand_update_bbt(mtd, rewrite_bbt[chipnr]);
- }
-
/* Return more or less happy */
return ret;
}
chip->onfi_version = 20;
else if (val & (1 << 1))
chip->onfi_version = 10;
- else
- chip->onfi_version = 0;
if (!chip->onfi_version) {
pr_info("%s: unsupported ONFI version: %d\n", __func__, val);
chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
}
+static inline bool is_full_id_nand(struct nand_flash_dev *type)
+{
+ return type->id_len;
+}
+
+static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
+ struct nand_flash_dev *type, u8 *id_data, int *busw)
+{
+ if (!strncmp(type->id, id_data, type->id_len)) {
+ mtd->writesize = type->pagesize;
+ mtd->erasesize = type->erasesize;
+ mtd->oobsize = type->oobsize;
+
+ chip->cellinfo = id_data[2];
+ chip->chipsize = (uint64_t)type->chipsize << 20;
+ chip->options |= type->options;
+
+ *busw = type->options & NAND_BUSWIDTH_16;
+
+ return true;
+ }
+ return false;
+}
+
/*
* Get the flash and manufacturer id and lookup if the type is supported.
*/
if (!type)
type = nand_flash_ids;
- for (; type->name != NULL; type++)
- if (*dev_id == type->id)
- break;
+ for (; type->name != NULL; type++) {
+ if (is_full_id_nand(type)) {
+ if (find_full_id_nand(mtd, chip, type, id_data, &busw))
+ goto ident_done;
+ } else if (*dev_id == type->dev_id) {
+ break;
+ }
+ }
chip->onfi_version = 0;
if (!type->name || !type->pagesize) {
}
chip->badblockbits = 8;
-
- /* Check for AND chips with 4 page planes */
- if (chip->options & NAND_4PAGE_ARRAY)
- chip->erase_cmd = multi_erase_cmd;
- else
- chip->erase_cmd = single_erase_cmd;
+ chip->erase_cmd = single_erase_cmd;
/* Do not replace user supplied command function! */
if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
chip->ecc.read_oob = nand_read_oob_std;
if (!chip->ecc.write_oob)
chip->ecc.write_oob = nand_write_oob_std;
+ if (!chip->ecc.read_subpage)
+ chip->ecc.read_subpage = nand_read_subpage;
+ if (!chip->ecc.write_subpage)
+ chip->ecc.write_subpage = nand_write_subpage_hwecc;
case NAND_ECC_HW_SYNDROME:
if ((!chip->ecc.calculate || !chip->ecc.correct ||
*/
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
-static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
-
-static struct nand_bbt_descr agand_flashbased = {
- .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
- .offs = 0x20,
- .len = 6,
- .pattern = scan_agand_pattern
-};
-
/* Generic flash bbt descriptors */
static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
{
struct nand_chip *this = mtd->priv;
- /*
- * Default for AG-AND. We must use a flash based bad block table as the
- * devices have factory marked _good_ blocks. Erasing those blocks
- * leads to loss of the good / bad information, so we _must_ store this
- * information in a good / bad table during startup.
- */
- if (this->options & NAND_IS_AND) {
- /* Use the default pattern descriptors */
- if (!this->bbt_td) {
- this->bbt_td = &bbt_main_descr;
- this->bbt_md = &bbt_mirror_descr;
- }
- this->bbt_options |= NAND_BBT_USE_FLASH;
- return nand_scan_bbt(mtd, &agand_flashbased);
- }
-
/* Is a flash based bad block table requested? */
if (this->bbt_options & NAND_BBT_USE_FLASH) {
/* Use the default pattern descriptors */
*/
#include <linux/module.h>
#include <linux/mtd/nand.h>
-/*
-* Chip ID list
-*
-* Name. ID code, pagesize, chipsize in MegaByte, eraseblock size,
-* options
-*
-* Pagesize; 0, 256, 512
-* 0 get this information from the extended chip ID
-+ 256 256 Byte page size
-* 512 512 Byte page size
-*/
-struct nand_flash_dev nand_flash_ids[] = {
+#include <linux/sizes.h>
+
+#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS
+#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
+
#define SP_OPTIONS NAND_NEED_READRDY
#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
-#ifdef CONFIG_MTD_NAND_MUSEUM_IDS
- {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, SP_OPTIONS},
- {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, SP_OPTIONS},
- {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, SP_OPTIONS},
- {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, SP_OPTIONS},
- {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, SP_OPTIONS},
- {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, SP_OPTIONS},
- {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, SP_OPTIONS},
- {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, SP_OPTIONS},
- {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, SP_OPTIONS},
- {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, SP_OPTIONS},
-
- {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, SP_OPTIONS},
- {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, SP_OPTIONS},
- {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, SP_OPTIONS16},
- {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, SP_OPTIONS16},
-#endif
-
- {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, SP_OPTIONS},
- {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, SP_OPTIONS},
- {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, SP_OPTIONS16},
- {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, SP_OPTIONS16},
-
- {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, SP_OPTIONS},
- {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, SP_OPTIONS},
- {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, SP_OPTIONS16},
- {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, SP_OPTIONS16},
-
- {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, SP_OPTIONS},
- {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, SP_OPTIONS},
- {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, SP_OPTIONS16},
- {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, SP_OPTIONS16},
-
- {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, SP_OPTIONS},
- {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, SP_OPTIONS},
- {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, SP_OPTIONS},
- {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, SP_OPTIONS16},
- {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, SP_OPTIONS16},
- {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, SP_OPTIONS16},
- {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, SP_OPTIONS16},
-
- {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, SP_OPTIONS},
+/*
+ * The chip ID list:
+ * name, device ID, page size, chip size in MiB, eraseblock size, options
+ *
+ * If page size and eraseblock size are 0, the sizes are taken from the
+ * extended chip ID.
+ */
+struct nand_flash_dev nand_flash_ids[] = {
+ /*
+ * Some incompatible NAND chips share device ID's and so must be
+ * listed by full ID. We list them first so that we can easily identify
+ * the most specific match.
+ */
+ {"TC58NVG2S0F 4G 3.3V 8-bit",
+ { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
+ SZ_4K, SZ_512, SZ_256K, 0, 8, 224},
+ {"TC58NVG3S0F 8G 3.3V 8-bit",
+ { .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
+ SZ_4K, SZ_1K, SZ_256K, 0, 8, 232},
+ {"TC58NVG5D2 32G 3.3V 8-bit",
+ { .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} },
+ SZ_8K, SZ_4K, SZ_1M, 0, 8, 640},
+ {"TC58NVG6D2 64G 3.3V 8-bit",
+ { .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },
+ SZ_8K, SZ_8K, SZ_2M, 0, 8, 640},
+
+ LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xD6, 8, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xE6, 8, SZ_8K, SP_OPTIONS),
+
+ LEGACY_ID_NAND("NAND 16MiB 1,8V 8-bit", 0x33, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 16MiB 3,3V 8-bit", 0x73, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 16MiB 1,8V 16-bit", 0x43, 16, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 16MiB 3,3V 16-bit", 0x53, 16, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 32MiB 1,8V 8-bit", 0x35, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 32MiB 3,3V 8-bit", 0x75, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 32MiB 1,8V 16-bit", 0x45, 32, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 32MiB 3,3V 16-bit", 0x55, 32, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 64MiB 1,8V 8-bit", 0x36, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 64MiB 3,3V 8-bit", 0x76, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 64MiB 1,8V 16-bit", 0x46, 64, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 64MiB 3,3V 16-bit", 0x56, 64, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x78, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x39, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 8-bit", 0x79, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x72, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x49, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x74, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x59, 128, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 256MiB 3,3V 8-bit", 0x71, 256, SZ_16K, SP_OPTIONS),
/*
- * These are the new chips with large page size. The pagesize and the
- * erasesize is determined from the extended id bytes
+ * These are the new chips with large page size. Their page size and
+ * eraseblock size are determined from the extended ID bytes.
*/
-#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS
-#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
/* 512 Megabit */
- {"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS},
- {"NAND 64MiB 1,8V 8-bit", 0xA0, 0, 64, 0, LP_OPTIONS},
- {"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS},
- {"NAND 64MiB 3,3V 8-bit", 0xD0, 0, 64, 0, LP_OPTIONS},
- {"NAND 64MiB 3,3V 8-bit", 0xF0, 0, 64, 0, LP_OPTIONS},
- {"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16},
- {"NAND 64MiB 1,8V 16-bit", 0xB0, 0, 64, 0, LP_OPTIONS16},
- {"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16},
- {"NAND 64MiB 3,3V 16-bit", 0xC0, 0, 64, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xD0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB0, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC0, 64, LP_OPTIONS16),
/* 1 Gigabit */
- {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS},
- {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS},
- {"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS},
- {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16},
- {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16},
- {"NAND 128MiB 1,8V 16-bit", 0xAD, 0, 128, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 8-bit", 0xA1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xF1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xD1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xB1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 16-bit", 0xC1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xAD, 128, LP_OPTIONS16),
/* 2 Gigabit */
- {"NAND 256MiB 1,8V 8-bit", 0xAA, 0, 256, 0, LP_OPTIONS},
- {"NAND 256MiB 3,3V 8-bit", 0xDA, 0, 256, 0, LP_OPTIONS},
- {"NAND 256MiB 1,8V 16-bit", 0xBA, 0, 256, 0, LP_OPTIONS16},
- {"NAND 256MiB 3,3V 16-bit", 0xCA, 0, 256, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 256MiB 1,8V 8-bit", 0xAA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 256MiB 3,3V 8-bit", 0xDA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 256MiB 1,8V 16-bit", 0xBA, 256, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 256MiB 3,3V 16-bit", 0xCA, 256, LP_OPTIONS16),
/* 4 Gigabit */
- {"NAND 512MiB 1,8V 8-bit", 0xAC, 0, 512, 0, LP_OPTIONS},
- {"NAND 512MiB 3,3V 8-bit", 0xDC, 0, 512, 0, LP_OPTIONS},
- {"NAND 512MiB 1,8V 16-bit", 0xBC, 0, 512, 0, LP_OPTIONS16},
- {"NAND 512MiB 3,3V 16-bit", 0xCC, 0, 512, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 512MiB 1,8V 8-bit", 0xAC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 512MiB 3,3V 8-bit", 0xDC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 512MiB 1,8V 16-bit", 0xBC, 512, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 512MiB 3,3V 16-bit", 0xCC, 512, LP_OPTIONS16),
/* 8 Gigabit */
- {"NAND 1GiB 1,8V 8-bit", 0xA3, 0, 1024, 0, LP_OPTIONS},
- {"NAND 1GiB 3,3V 8-bit", 0xD3, 0, 1024, 0, LP_OPTIONS},
- {"NAND 1GiB 1,8V 16-bit", 0xB3, 0, 1024, 0, LP_OPTIONS16},
- {"NAND 1GiB 3,3V 16-bit", 0xC3, 0, 1024, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit", 0xA3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit", 0xD3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16),
/* 16 Gigabit */
- {"NAND 2GiB 1,8V 8-bit", 0xA5, 0, 2048, 0, LP_OPTIONS},
- {"NAND 2GiB 3,3V 8-bit", 0xD5, 0, 2048, 0, LP_OPTIONS},
- {"NAND 2GiB 1,8V 16-bit", 0xB5, 0, 2048, 0, LP_OPTIONS16},
- {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 2GiB 1,8V 8-bit", 0xA5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 2GiB 3,3V 8-bit", 0xD5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 2GiB 1,8V 16-bit", 0xB5, 2048, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 2GiB 3,3V 16-bit", 0xC5, 2048, LP_OPTIONS16),
/* 32 Gigabit */
- {"NAND 4GiB 1,8V 8-bit", 0xA7, 0, 4096, 0, LP_OPTIONS},
- {"NAND 4GiB 3,3V 8-bit", 0xD7, 0, 4096, 0, LP_OPTIONS},
- {"NAND 4GiB 1,8V 16-bit", 0xB7, 0, 4096, 0, LP_OPTIONS16},
- {"NAND 4GiB 3,3V 16-bit", 0xC7, 0, 4096, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit", 0xA7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit", 0xD7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16),
/* 64 Gigabit */
- {"NAND 8GiB 1,8V 8-bit", 0xAE, 0, 8192, 0, LP_OPTIONS},
- {"NAND 8GiB 3,3V 8-bit", 0xDE, 0, 8192, 0, LP_OPTIONS},
- {"NAND 8GiB 1,8V 16-bit", 0xBE, 0, 8192, 0, LP_OPTIONS16},
- {"NAND 8GiB 3,3V 16-bit", 0xCE, 0, 8192, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 8GiB 1,8V 8-bit", 0xAE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 8GiB 3,3V 8-bit", 0xDE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 8GiB 1,8V 16-bit", 0xBE, 8192, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 8GiB 3,3V 16-bit", 0xCE, 8192, LP_OPTIONS16),
/* 128 Gigabit */
- {"NAND 16GiB 1,8V 8-bit", 0x1A, 0, 16384, 0, LP_OPTIONS},
- {"NAND 16GiB 3,3V 8-bit", 0x3A, 0, 16384, 0, LP_OPTIONS},
- {"NAND 16GiB 1,8V 16-bit", 0x2A, 0, 16384, 0, LP_OPTIONS16},
- {"NAND 16GiB 3,3V 16-bit", 0x4A, 0, 16384, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 16GiB 1,8V 8-bit", 0x1A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 16GiB 3,3V 8-bit", 0x3A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 16GiB 1,8V 16-bit", 0x2A, 16384, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 16GiB 3,3V 16-bit", 0x4A, 16384, LP_OPTIONS16),
/* 256 Gigabit */
- {"NAND 32GiB 1,8V 8-bit", 0x1C, 0, 32768, 0, LP_OPTIONS},
- {"NAND 32GiB 3,3V 8-bit", 0x3C, 0, 32768, 0, LP_OPTIONS},
- {"NAND 32GiB 1,8V 16-bit", 0x2C, 0, 32768, 0, LP_OPTIONS16},
- {"NAND 32GiB 3,3V 16-bit", 0x4C, 0, 32768, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 32GiB 1,8V 8-bit", 0x1C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 32GiB 3,3V 8-bit", 0x3C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 32GiB 1,8V 16-bit", 0x2C, 32768, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 32GiB 3,3V 16-bit", 0x4C, 32768, LP_OPTIONS16),
/* 512 Gigabit */
- {"NAND 64GiB 1,8V 8-bit", 0x1E, 0, 65536, 0, LP_OPTIONS},
- {"NAND 64GiB 3,3V 8-bit", 0x3E, 0, 65536, 0, LP_OPTIONS},
- {"NAND 64GiB 1,8V 16-bit", 0x2E, 0, 65536, 0, LP_OPTIONS16},
- {"NAND 64GiB 3,3V 16-bit", 0x4E, 0, 65536, 0, LP_OPTIONS16},
+ EXTENDED_ID_NAND("NAND 64GiB 1,8V 8-bit", 0x1E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64GiB 3,3V 8-bit", 0x3E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64GiB 1,8V 16-bit", 0x2E, 65536, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64GiB 3,3V 16-bit", 0x4E, 65536, LP_OPTIONS16),
- /*
- * Renesas AND 1 Gigabit. Those chips do not support extended id and
- * have a strange page/block layout ! The chosen minimum erasesize is
- * 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page
- * planes 1 block = 2 pages, but due to plane arrangement the blocks
- * 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7 Anyway JFFS2 would
- * increase the eraseblock size so we chose a combined one which can be
- * erased in one go There are more speed improvements for reads and
- * writes possible, but not implemented now
- */
- {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000,
- NAND_IS_AND | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH},
-
- {NULL,}
+ {NULL}
};
-/*
-* Manufacturer ID list
-*/
+/* Manufacturer IDs */
struct nand_manufacturers nand_manuf_ids[] = {
{NAND_MFR_TOSHIBA, "Toshiba"},
{NAND_MFR_SAMSUNG, "Samsung"},
#define STATE_CMD_READOOB 0x00000005 /* read OOB area */
#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
#define STATE_CMD_STATUS 0x00000007 /* read status */
-#define STATE_CMD_STATUS_M 0x00000008 /* read multi-plane status (isn't implemented) */
#define STATE_CMD_SEQIN 0x00000009 /* sequential data input */
#define STATE_CMD_READID 0x0000000A /* read ID */
#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
#define NS_OPER_STATES 6 /* Maximum number of states in operation */
#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */
-#define OPT_PAGE256 0x00000001 /* 256-byte page chips */
#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
-#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */
+#define OPT_SMALLPAGE (OPT_PAGE512) /* 512-byte page chips */
/* Remove action bits from state */
#define NS_STATE(x) ((x) & ~ACTION_MASK)
{OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
/* Read status */
{OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
- /* Read multi-plane status */
- {OPT_SMARTMEDIA, {STATE_CMD_STATUS_M, STATE_DATAOUT_STATUS_M, STATE_READY}},
/* Read ID */
{OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
/* Large page devices read page */
ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
ns->options = 0;
- if (ns->geom.pgsz == 256) {
- ns->options |= OPT_PAGE256;
- }
- else if (ns->geom.pgsz == 512) {
+ if (ns->geom.pgsz == 512) {
ns->options |= OPT_PAGE512;
if (ns->busw == 8)
ns->options |= OPT_PAGE512_8BIT;
}
/* Detect how many ID bytes the NAND chip outputs */
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if (second_id_byte != nand_flash_ids[i].id)
- continue;
+ for (i = 0; nand_flash_ids[i].name != NULL; i++) {
+ if (second_id_byte != nand_flash_ids[i].dev_id)
+ continue;
}
if (ns->busw == 16)
return "STATE_CMD_ERASE1";
case STATE_CMD_STATUS:
return "STATE_CMD_STATUS";
- case STATE_CMD_STATUS_M:
- return "STATE_CMD_STATUS_M";
case STATE_CMD_SEQIN:
return "STATE_CMD_SEQIN";
case STATE_CMD_READID:
case NAND_CMD_RNDOUTSTART:
return 0;
- case NAND_CMD_STATUS_MULTI:
default:
return 1;
}
return STATE_CMD_ERASE1;
case NAND_CMD_STATUS:
return STATE_CMD_STATUS;
- case NAND_CMD_STATUS_MULTI:
- return STATE_CMD_STATUS_M;
case NAND_CMD_SEQIN:
return STATE_CMD_SEQIN;
case NAND_CMD_READID:
nand->geom.idbytes = 2;
nand->regs.status = NS_STATUS_OK(nand);
nand->nxstate = STATE_UNKNOWN;
- nand->options |= OPT_PAGE256; /* temporary value */
+ nand->options |= OPT_PAGE512; /* temporary value */
nand->ids[0] = first_id_byte;
nand->ids[1] = second_id_byte;
nand->ids[2] = third_id_byte;
case NAND_CMD_SEQIN:
case NAND_CMD_RNDIN:
case NAND_CMD_STATUS:
- case NAND_CMD_DEPLETE1:
- return;
-
- case NAND_CMD_STATUS_ERROR:
- case NAND_CMD_STATUS_ERROR0:
- case NAND_CMD_STATUS_ERROR1:
- case NAND_CMD_STATUS_ERROR2:
- case NAND_CMD_STATUS_ERROR3:
- udelay(chip->chip_delay);
return;
case NAND_CMD_RESET:
int status, state = this->state;
if (state == FL_ERASING)
- timeo += (HZ * 400) / 1000;
+ timeo += msecs_to_jiffies(400);
else
- timeo += (HZ * 20) / 1000;
+ timeo += msecs_to_jiffies(20);
writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
while (time_before(jiffies, timeo)) {
elm_node = of_find_node_by_phandle(be32_to_cpup(parp));
pdev = of_find_device_by_node(elm_node);
info->elm_dev = &pdev->dev;
- elm_config(info->elm_dev, bch_type);
- info->is_elm_used = true;
+
+ if (elm_config(info->elm_dev, bch_type) == 0)
+ info->is_elm_used = true;
}
if (info->is_elm_used && (mtd->writesize <= 4096)) {
},
};
-static int __init orion_nand_init(void)
-{
- return platform_driver_probe(&orion_nand_driver, orion_nand_probe);
-}
-
-static void __exit orion_nand_exit(void)
-{
- platform_driver_unregister(&orion_nand_driver);
-}
-
-module_init(orion_nand_init);
-module_exit(orion_nand_exit);
+module_platform_driver_probe(orion_nand_driver, orion_nand_probe);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tzachi Perelstein");
+++ /dev/null
-/*
- * drivers/mtd/nand/ppchameleonevb.c
- *
- * Copyright (C) 2003 DAVE Srl (info@wawnet.biz)
- *
- * Derived from drivers/mtd/nand/edb7312.c
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash devices found on the
- * PPChameleon/PPChameleonEVB system.
- * PPChameleon options (autodetected):
- * - BA model: no NAND
- * - ME model: 32MB (Samsung K9F5608U0B)
- * - HI model: 128MB (Samsung K9F1G08UOM)
- * PPChameleonEVB options:
- * - 32MB (Samsung K9F5608U0B)
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <platforms/PPChameleonEVB.h>
-
-#undef USE_READY_BUSY_PIN
-#define USE_READY_BUSY_PIN
-/* see datasheets (tR) */
-#define NAND_BIG_DELAY_US 25
-#define NAND_SMALL_DELAY_US 10
-
-/* handy sizes */
-#define SZ_4M 0x00400000
-#define NAND_SMALL_SIZE 0x02000000
-#define NAND_MTD_NAME "ppchameleon-nand"
-#define NAND_EVB_MTD_NAME "ppchameleonevb-nand"
-
-/* GPIO pins used to drive NAND chip mounted on processor module */
-#define NAND_nCE_GPIO_PIN (0x80000000 >> 1)
-#define NAND_CLE_GPIO_PIN (0x80000000 >> 2)
-#define NAND_ALE_GPIO_PIN (0x80000000 >> 3)
-#define NAND_RB_GPIO_PIN (0x80000000 >> 4)
-/* GPIO pins used to drive NAND chip mounted on EVB */
-#define NAND_EVB_nCE_GPIO_PIN (0x80000000 >> 14)
-#define NAND_EVB_CLE_GPIO_PIN (0x80000000 >> 15)
-#define NAND_EVB_ALE_GPIO_PIN (0x80000000 >> 16)
-#define NAND_EVB_RB_GPIO_PIN (0x80000000 >> 31)
-
-/*
- * MTD structure for PPChameleonEVB board
- */
-static struct mtd_info *ppchameleon_mtd = NULL;
-static struct mtd_info *ppchameleonevb_mtd = NULL;
-
-/*
- * Module stuff
- */
-static unsigned long ppchameleon_fio_pbase = CFG_NAND0_PADDR;
-static unsigned long ppchameleonevb_fio_pbase = CFG_NAND1_PADDR;
-
-#ifdef MODULE
-module_param(ppchameleon_fio_pbase, ulong, 0);
-module_param(ppchameleonevb_fio_pbase, ulong, 0);
-#else
-__setup("ppchameleon_fio_pbase=", ppchameleon_fio_pbase);
-__setup("ppchameleonevb_fio_pbase=", ppchameleonevb_fio_pbase);
-#endif
-
-/*
- * Define static partitions for flash devices
- */
-static struct mtd_partition partition_info_hi[] = {
- { .name = "PPChameleon HI Nand Flash",
- .offset = 0,
- .size = 128 * 1024 * 1024
- }
-};
-
-static struct mtd_partition partition_info_me[] = {
- { .name = "PPChameleon ME Nand Flash",
- .offset = 0,
- .size = 32 * 1024 * 1024
- }
-};
-
-static struct mtd_partition partition_info_evb[] = {
- { .name = "PPChameleonEVB Nand Flash",
- .offset = 0,
- .size = 32 * 1024 * 1024
- }
-};
-
-#define NUM_PARTITIONS 1
-
-/*
- * hardware specific access to control-lines
- */
-static void ppchameleon_hwcontrol(struct mtd_info *mtdinfo, int cmd,
- unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (ctrl & NAND_CTRL_CHANGE) {
-#error Missing headerfiles. No way to fix this. -tglx
- switch (cmd) {
- case NAND_CTL_SETCLE:
- MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_CLRCLE:
- MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_SETALE:
- MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_CLRALE:
- MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_SETNCE:
- MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND0_PADDR);
- break;
- case NAND_CTL_CLRNCE:
- MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND0_PADDR);
- break;
- }
- }
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-static void ppchameleonevb_hwcontrol(struct mtd_info *mtdinfo, int cmd,
- unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (ctrl & NAND_CTRL_CHANGE) {
-#error Missing headerfiles. No way to fix this. -tglx
- switch (cmd) {
- case NAND_CTL_SETCLE:
- MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_CLRCLE:
- MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_SETALE:
- MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_CLRALE:
- MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_SETNCE:
- MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND1_PADDR);
- break;
- case NAND_CTL_CLRNCE:
- MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND1_PADDR);
- break;
- }
- }
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-#ifdef USE_READY_BUSY_PIN
-/*
- * read device ready pin
- */
-static int ppchameleon_device_ready(struct mtd_info *minfo)
-{
- if (in_be32((volatile unsigned *)GPIO0_IR) & NAND_RB_GPIO_PIN)
- return 1;
- return 0;
-}
-
-static int ppchameleonevb_device_ready(struct mtd_info *minfo)
-{
- if (in_be32((volatile unsigned *)GPIO0_IR) & NAND_EVB_RB_GPIO_PIN)
- return 1;
- return 0;
-}
-#endif
-
-/*
- * Main initialization routine
- */
-static int __init ppchameleonevb_init(void)
-{
- struct nand_chip *this;
- void __iomem *ppchameleon_fio_base;
- void __iomem *ppchameleonevb_fio_base;
-
- /*********************************
- * Processor module NAND (if any) *
- *********************************/
- /* Allocate memory for MTD device structure and private data */
- ppchameleon_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!ppchameleon_mtd) {
- printk("Unable to allocate PPChameleon NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* map physical address */
- ppchameleon_fio_base = ioremap(ppchameleon_fio_pbase, SZ_4M);
- if (!ppchameleon_fio_base) {
- printk("ioremap PPChameleon NAND flash failed\n");
- kfree(ppchameleon_mtd);
- return -EIO;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&ppchameleon_mtd[1]);
-
- /* Initialize structures */
- memset(ppchameleon_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- ppchameleon_mtd->priv = this;
- ppchameleon_mtd->owner = THIS_MODULE;
-
- /* Initialize GPIOs */
- /* Pin mapping for NAND chip */
- /*
- CE GPIO_01
- CLE GPIO_02
- ALE GPIO_03
- R/B GPIO_04
- */
- /* output select */
- out_be32((volatile unsigned *)GPIO0_OSRH, in_be32((volatile unsigned *)GPIO0_OSRH) & 0xC0FFFFFF);
- /* three-state select */
- out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xC0FFFFFF);
- /* enable output driver */
- out_be32((volatile unsigned *)GPIO0_TCR,
- in_be32((volatile unsigned *)GPIO0_TCR) | NAND_nCE_GPIO_PIN | NAND_CLE_GPIO_PIN | NAND_ALE_GPIO_PIN);
-#ifdef USE_READY_BUSY_PIN
- /* three-state select */
- out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xFF3FFFFF);
- /* high-impedecence */
- out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) & (~NAND_RB_GPIO_PIN));
- /* input select */
- out_be32((volatile unsigned *)GPIO0_ISR1H,
- (in_be32((volatile unsigned *)GPIO0_ISR1H) & 0xFF3FFFFF) | 0x00400000);
-#endif
-
- /* insert callbacks */
- this->IO_ADDR_R = ppchameleon_fio_base;
- this->IO_ADDR_W = ppchameleon_fio_base;
- this->cmd_ctrl = ppchameleon_hwcontrol;
-#ifdef USE_READY_BUSY_PIN
- this->dev_ready = ppchameleon_device_ready;
-#endif
- this->chip_delay = NAND_BIG_DELAY_US;
- /* ECC mode */
- this->ecc.mode = NAND_ECC_SOFT;
-
- /* Scan to find existence of the device (it could not be mounted) */
- if (nand_scan(ppchameleon_mtd, 1)) {
- iounmap((void *)ppchameleon_fio_base);
- ppchameleon_fio_base = NULL;
- kfree(ppchameleon_mtd);
- goto nand_evb_init;
- }
-#ifndef USE_READY_BUSY_PIN
- /* Adjust delay if necessary */
- if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
- this->chip_delay = NAND_SMALL_DELAY_US;
-#endif
-
- ppchameleon_mtd->name = "ppchameleon-nand";
-
- /* Register the partitions */
- mtd_device_parse_register(ppchameleon_mtd, NULL, NULL,
- ppchameleon_mtd->size == NAND_SMALL_SIZE ?
- partition_info_me : partition_info_hi,
- NUM_PARTITIONS);
-
- nand_evb_init:
- /****************************
- * EVB NAND (always present) *
- ****************************/
- /* Allocate memory for MTD device structure and private data */
- ppchameleonevb_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!ppchameleonevb_mtd) {
- printk("Unable to allocate PPChameleonEVB NAND MTD device structure.\n");
- if (ppchameleon_fio_base)
- iounmap(ppchameleon_fio_base);
- return -ENOMEM;
- }
-
- /* map physical address */
- ppchameleonevb_fio_base = ioremap(ppchameleonevb_fio_pbase, SZ_4M);
- if (!ppchameleonevb_fio_base) {
- printk("ioremap PPChameleonEVB NAND flash failed\n");
- kfree(ppchameleonevb_mtd);
- if (ppchameleon_fio_base)
- iounmap(ppchameleon_fio_base);
- return -EIO;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&ppchameleonevb_mtd[1]);
-
- /* Initialize structures */
- memset(ppchameleonevb_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- ppchameleonevb_mtd->priv = this;
-
- /* Initialize GPIOs */
- /* Pin mapping for NAND chip */
- /*
- CE GPIO_14
- CLE GPIO_15
- ALE GPIO_16
- R/B GPIO_31
- */
- /* output select */
- out_be32((volatile unsigned *)GPIO0_OSRH, in_be32((volatile unsigned *)GPIO0_OSRH) & 0xFFFFFFF0);
- out_be32((volatile unsigned *)GPIO0_OSRL, in_be32((volatile unsigned *)GPIO0_OSRL) & 0x3FFFFFFF);
- /* three-state select */
- out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xFFFFFFF0);
- out_be32((volatile unsigned *)GPIO0_TSRL, in_be32((volatile unsigned *)GPIO0_TSRL) & 0x3FFFFFFF);
- /* enable output driver */
- out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) | NAND_EVB_nCE_GPIO_PIN |
- NAND_EVB_CLE_GPIO_PIN | NAND_EVB_ALE_GPIO_PIN);
-#ifdef USE_READY_BUSY_PIN
- /* three-state select */
- out_be32((volatile unsigned *)GPIO0_TSRL, in_be32((volatile unsigned *)GPIO0_TSRL) & 0xFFFFFFFC);
- /* high-impedecence */
- out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) & (~NAND_EVB_RB_GPIO_PIN));
- /* input select */
- out_be32((volatile unsigned *)GPIO0_ISR1L,
- (in_be32((volatile unsigned *)GPIO0_ISR1L) & 0xFFFFFFFC) | 0x00000001);
-#endif
-
- /* insert callbacks */
- this->IO_ADDR_R = ppchameleonevb_fio_base;
- this->IO_ADDR_W = ppchameleonevb_fio_base;
- this->cmd_ctrl = ppchameleonevb_hwcontrol;
-#ifdef USE_READY_BUSY_PIN
- this->dev_ready = ppchameleonevb_device_ready;
-#endif
- this->chip_delay = NAND_SMALL_DELAY_US;
-
- /* ECC mode */
- this->ecc.mode = NAND_ECC_SOFT;
-
- /* Scan to find existence of the device */
- if (nand_scan(ppchameleonevb_mtd, 1)) {
- iounmap((void *)ppchameleonevb_fio_base);
- kfree(ppchameleonevb_mtd);
- if (ppchameleon_fio_base)
- iounmap(ppchameleon_fio_base);
- return -ENXIO;
- }
-
- ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
-
- /* Register the partitions */
- mtd_device_parse_register(ppchameleonevb_mtd, NULL, NULL,
- ppchameleon_mtd->size == NAND_SMALL_SIZE ?
- partition_info_me : partition_info_hi,
- NUM_PARTITIONS);
-
- /* Return happy */
- return 0;
-}
-
-module_init(ppchameleonevb_init);
-
-/*
- * Clean up routine
- */
-static void __exit ppchameleonevb_cleanup(void)
-{
- struct nand_chip *this;
-
- /* Release resources, unregister device(s) */
- nand_release(ppchameleon_mtd);
- nand_release(ppchameleonevb_mtd);
-
- /* Release iomaps */
- this = (struct nand_chip *) &ppchameleon_mtd[1];
- iounmap((void *) this->IO_ADDR_R);
- this = (struct nand_chip *) &ppchameleonevb_mtd[1];
- iounmap((void *) this->IO_ADDR_R);
-
- /* Free the MTD device structure */
- kfree (ppchameleon_mtd);
- kfree (ppchameleonevb_mtd);
-}
-module_exit(ppchameleonevb_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("DAVE Srl <support-ppchameleon@dave-tech.it>");
-MODULE_DESCRIPTION("MTD map driver for DAVE Srl PPChameleonEVB board");
}
pxa3xx_flash_ids[0].name = f->name;
- pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff;
+ pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
pxa3xx_flash_ids[0].pagesize = f->page_size;
chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
+++ /dev/null
-/*
- * drivers/mtd/nand/rtc_from4.c
- *
- * Copyright (C) 2004 Red Hat, Inc.
- *
- * Derived from drivers/mtd/nand/spia.c
- * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the AG-AND flash device found on the
- * Renesas Technology Corp. Flash ROM 4-slot interface board (FROM_BOARD4),
- * which utilizes the Renesas HN29V1G91T-30 part.
- * This chip is a 1 GBibit (128MiB x 8 bits) AG-AND flash device.
- */
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/rslib.h>
-#include <linux/bitrev.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-
-/*
- * MTD structure for Renesas board
- */
-static struct mtd_info *rtc_from4_mtd = NULL;
-
-#define RTC_FROM4_MAX_CHIPS 2
-
-/* HS77x9 processor register defines */
-#define SH77X9_BCR1 ((volatile unsigned short *)(0xFFFFFF60))
-#define SH77X9_BCR2 ((volatile unsigned short *)(0xFFFFFF62))
-#define SH77X9_WCR1 ((volatile unsigned short *)(0xFFFFFF64))
-#define SH77X9_WCR2 ((volatile unsigned short *)(0xFFFFFF66))
-#define SH77X9_MCR ((volatile unsigned short *)(0xFFFFFF68))
-#define SH77X9_PCR ((volatile unsigned short *)(0xFFFFFF6C))
-#define SH77X9_FRQCR ((volatile unsigned short *)(0xFFFFFF80))
-
-/*
- * Values specific to the Renesas Technology Corp. FROM_BOARD4 (used with HS77x9 processor)
- */
-/* Address where flash is mapped */
-#define RTC_FROM4_FIO_BASE 0x14000000
-
-/* CLE and ALE are tied to address lines 5 & 4, respectively */
-#define RTC_FROM4_CLE (1 << 5)
-#define RTC_FROM4_ALE (1 << 4)
-
-/* address lines A24-A22 used for chip selection */
-#define RTC_FROM4_NAND_ADDR_SLOT3 (0x00800000)
-#define RTC_FROM4_NAND_ADDR_SLOT4 (0x00C00000)
-#define RTC_FROM4_NAND_ADDR_FPGA (0x01000000)
-/* mask address lines A24-A22 used for chip selection */
-#define RTC_FROM4_NAND_ADDR_MASK (RTC_FROM4_NAND_ADDR_SLOT3 | RTC_FROM4_NAND_ADDR_SLOT4 | RTC_FROM4_NAND_ADDR_FPGA)
-
-/* FPGA status register for checking device ready (bit zero) */
-#define RTC_FROM4_FPGA_SR (RTC_FROM4_NAND_ADDR_FPGA | 0x00000002)
-#define RTC_FROM4_DEVICE_READY 0x0001
-
-/* FPGA Reed-Solomon ECC Control register */
-
-#define RTC_FROM4_RS_ECC_CTL (RTC_FROM4_NAND_ADDR_FPGA | 0x00000050)
-#define RTC_FROM4_RS_ECC_CTL_CLR (1 << 7)
-#define RTC_FROM4_RS_ECC_CTL_GEN (1 << 6)
-#define RTC_FROM4_RS_ECC_CTL_FD_E (1 << 5)
-
-/* FPGA Reed-Solomon ECC code base */
-#define RTC_FROM4_RS_ECC (RTC_FROM4_NAND_ADDR_FPGA | 0x00000060)
-#define RTC_FROM4_RS_ECCN (RTC_FROM4_NAND_ADDR_FPGA | 0x00000080)
-
-/* FPGA Reed-Solomon ECC check register */
-#define RTC_FROM4_RS_ECC_CHK (RTC_FROM4_NAND_ADDR_FPGA | 0x00000070)
-#define RTC_FROM4_RS_ECC_CHK_ERROR (1 << 7)
-
-#define ERR_STAT_ECC_AVAILABLE 0x20
-
-/* Undefine for software ECC */
-#define RTC_FROM4_HWECC 1
-
-/* Define as 1 for no virtual erase blocks (in JFFS2) */
-#define RTC_FROM4_NO_VIRTBLOCKS 0
-
-/*
- * Module stuff
- */
-static void __iomem *rtc_from4_fio_base = (void *)P2SEGADDR(RTC_FROM4_FIO_BASE);
-
-static const struct mtd_partition partition_info[] = {
- {
- .name = "Renesas flash partition 1",
- .offset = 0,
- .size = MTDPART_SIZ_FULL},
-};
-
-#define NUM_PARTITIONS 1
-
-/*
- * hardware specific flash bbt decriptors
- * Note: this is to allow debugging by disabling
- * NAND_BBT_CREATE and/or NAND_BBT_WRITE
- *
- */
-static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
-static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' };
-
-static struct nand_bbt_descr rtc_from4_bbt_main_descr = {
- .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
- | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
- .offs = 40,
- .len = 4,
- .veroffs = 44,
- .maxblocks = 4,
- .pattern = bbt_pattern
-};
-
-static struct nand_bbt_descr rtc_from4_bbt_mirror_descr = {
- .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
- | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
- .offs = 40,
- .len = 4,
- .veroffs = 44,
- .maxblocks = 4,
- .pattern = mirror_pattern
-};
-
-#ifdef RTC_FROM4_HWECC
-
-/* the Reed Solomon control structure */
-static struct rs_control *rs_decoder;
-
-/*
- * hardware specific Out Of Band information
- */
-static struct nand_ecclayout rtc_from4_nand_oobinfo = {
- .eccbytes = 32,
- .eccpos = {
- 0, 1, 2, 3, 4, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, 31},
- .oobfree = {{32, 32}}
-};
-
-#endif
-
-/*
- * rtc_from4_hwcontrol - hardware specific access to control-lines
- * @mtd: MTD device structure
- * @cmd: hardware control command
- *
- * Address lines (A5 and A4) are used to control Command and Address Latch
- * Enable on this board, so set the read/write address appropriately.
- *
- * Chip Enable is also controlled by the Chip Select (CS5) and
- * Address lines (A24-A22), so no action is required here.
- *
- */
-static void rtc_from4_hwcontrol(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
-{
- struct nand_chip *chip = (mtd->priv);
-
- if (cmd == NAND_CMD_NONE)
- return;
-
- if (ctrl & NAND_CLE)
- writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_CLE);
- else
- writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_ALE);
-}
-
-/*
- * rtc_from4_nand_select_chip - hardware specific chip select
- * @mtd: MTD device structure
- * @chip: Chip to select (0 == slot 3, 1 == slot 4)
- *
- * The chip select is based on address lines A24-A22.
- * This driver uses flash slots 3 and 4 (A23-A22).
- *
- */
-static void rtc_from4_nand_select_chip(struct mtd_info *mtd, int chip)
-{
- struct nand_chip *this = mtd->priv;
-
- this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R & ~RTC_FROM4_NAND_ADDR_MASK);
- this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_NAND_ADDR_MASK);
-
- switch (chip) {
-
- case 0: /* select slot 3 chip */
- this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT3);
- this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT3);
- break;
- case 1: /* select slot 4 chip */
- this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT4);
- this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT4);
- break;
-
- }
-}
-
-/*
- * rtc_from4_nand_device_ready - hardware specific ready/busy check
- * @mtd: MTD device structure
- *
- * This board provides the Ready/Busy state in the status register
- * of the FPGA. Bit zero indicates the RDY(1)/BSY(0) signal.
- *
- */
-static int rtc_from4_nand_device_ready(struct mtd_info *mtd)
-{
- unsigned short status;
-
- status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_FPGA_SR));
-
- return (status & RTC_FROM4_DEVICE_READY);
-
-}
-
-/*
- * deplete - code to perform device recovery in case there was a power loss
- * @mtd: MTD device structure
- * @chip: Chip to select (0 == slot 3, 1 == slot 4)
- *
- * If there was a sudden loss of power during an erase operation, a
- * "device recovery" operation must be performed when power is restored
- * to ensure correct operation. This routine performs the required steps
- * for the requested chip.
- *
- * See page 86 of the data sheet for details.
- *
- */
-static void deplete(struct mtd_info *mtd, int chip)
-{
- struct nand_chip *this = mtd->priv;
-
- /* wait until device is ready */
- while (!this->dev_ready(mtd)) ;
-
- this->select_chip(mtd, chip);
-
- /* Send the commands for device recovery, phase 1 */
- this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0000);
- this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1);
-
- /* Send the commands for device recovery, phase 2 */
- this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0004);
- this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1);
-
-}
-
-#ifdef RTC_FROM4_HWECC
-/*
- * rtc_from4_enable_hwecc - hardware specific hardware ECC enable function
- * @mtd: MTD device structure
- * @mode: I/O mode; read or write
- *
- * enable hardware ECC for data read or write
- *
- */
-static void rtc_from4_enable_hwecc(struct mtd_info *mtd, int mode)
-{
- volatile unsigned short *rs_ecc_ctl = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CTL);
- unsigned short status;
-
- switch (mode) {
- case NAND_ECC_READ:
- status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_FD_E;
-
- *rs_ecc_ctl = status;
- break;
-
- case NAND_ECC_READSYN:
- status = 0x00;
-
- *rs_ecc_ctl = status;
- break;
-
- case NAND_ECC_WRITE:
- status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_GEN | RTC_FROM4_RS_ECC_CTL_FD_E;
-
- *rs_ecc_ctl = status;
- break;
-
- default:
- BUG();
- break;
- }
-
-}
-
-/*
- * rtc_from4_calculate_ecc - hardware specific code to read ECC code
- * @mtd: MTD device structure
- * @dat: buffer containing the data to generate ECC codes
- * @ecc_code ECC codes calculated
- *
- * The ECC code is calculated by the FPGA. All we have to do is read the values
- * from the FPGA registers.
- *
- * Note: We read from the inverted registers, since data is inverted before
- * the code is calculated. So all 0xff data (blank page) results in all 0xff rs code
- *
- */
-static void rtc_from4_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
-{
- volatile unsigned short *rs_eccn = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECCN);
- unsigned short value;
- int i;
-
- for (i = 0; i < 8; i++) {
- value = *rs_eccn;
- ecc_code[i] = (unsigned char)value;
- rs_eccn++;
- }
- ecc_code[7] |= 0x0f; /* set the last four bits (not used) */
-}
-
-/*
- * rtc_from4_correct_data - hardware specific code to correct data using ECC code
- * @mtd: MTD device structure
- * @buf: buffer containing the data to generate ECC codes
- * @ecc1 ECC codes read
- * @ecc2 ECC codes calculated
- *
- * The FPGA tells us fast, if there's an error or not. If no, we go back happy
- * else we read the ecc results from the fpga and call the rs library to decode
- * and hopefully correct the error.
- *
- */
-static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_char *ecc1, u_char *ecc2)
-{
- int i, j, res;
- unsigned short status;
- uint16_t par[6], syn[6];
- uint8_t ecc[8];
- volatile unsigned short *rs_ecc;
-
- status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CHK));
-
- if (!(status & RTC_FROM4_RS_ECC_CHK_ERROR)) {
- return 0;
- }
-
- /* Read the syndrome pattern from the FPGA and correct the bitorder */
- rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC);
- for (i = 0; i < 8; i++) {
- ecc[i] = bitrev8(*rs_ecc);
- rs_ecc++;
- }
-
- /* convert into 6 10bit syndrome fields */
- par[5] = rs_decoder->index_of[(((uint16_t) ecc[0] >> 0) & 0x0ff) | (((uint16_t) ecc[1] << 8) & 0x300)];
- par[4] = rs_decoder->index_of[(((uint16_t) ecc[1] >> 2) & 0x03f) | (((uint16_t) ecc[2] << 6) & 0x3c0)];
- par[3] = rs_decoder->index_of[(((uint16_t) ecc[2] >> 4) & 0x00f) | (((uint16_t) ecc[3] << 4) & 0x3f0)];
- par[2] = rs_decoder->index_of[(((uint16_t) ecc[3] >> 6) & 0x003) | (((uint16_t) ecc[4] << 2) & 0x3fc)];
- par[1] = rs_decoder->index_of[(((uint16_t) ecc[5] >> 0) & 0x0ff) | (((uint16_t) ecc[6] << 8) & 0x300)];
- par[0] = (((uint16_t) ecc[6] >> 2) & 0x03f) | (((uint16_t) ecc[7] << 6) & 0x3c0);
-
- /* Convert to computable syndrome */
- for (i = 0; i < 6; i++) {
- syn[i] = par[0];
- for (j = 1; j < 6; j++)
- if (par[j] != rs_decoder->nn)
- syn[i] ^= rs_decoder->alpha_to[rs_modnn(rs_decoder, par[j] + i * j)];
-
- /* Convert to index form */
- syn[i] = rs_decoder->index_of[syn[i]];
- }
-
- /* Let the library code do its magic. */
- res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL);
- if (res > 0) {
- pr_debug("rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res);
- }
- return res;
-}
-
-/**
- * rtc_from4_errstat - perform additional error status checks
- * @mtd: MTD device structure
- * @this: NAND chip structure
- * @state: state or the operation
- * @status: status code returned from read status
- * @page: startpage inside the chip, must be called with (page & this->pagemask)
- *
- * Perform additional error status checks on erase and write failures
- * to determine if errors are correctable. For this device, correctable
- * 1-bit errors on erase and write are considered acceptable.
- *
- * note: see pages 34..37 of data sheet for details.
- *
- */
-static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this,
- int state, int status, int page)
-{
- int er_stat = 0;
- int rtn, retlen;
- size_t len;
- uint8_t *buf;
- int i;
-
- this->cmdfunc(mtd, NAND_CMD_STATUS_CLEAR, -1, -1);
-
- if (state == FL_ERASING) {
-
- for (i = 0; i < 4; i++) {
- if (!(status & 1 << (i + 1)))
- continue;
- this->cmdfunc(mtd, (NAND_CMD_STATUS_ERROR + i + 1),
- -1, -1);
- rtn = this->read_byte(mtd);
- this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1);
-
- /* err_ecc_not_avail */
- if (!(rtn & ERR_STAT_ECC_AVAILABLE))
- er_stat |= 1 << (i + 1);
- }
-
- } else if (state == FL_WRITING) {
-
- unsigned long corrected = mtd->ecc_stats.corrected;
-
- /* single bank write logic */
- this->cmdfunc(mtd, NAND_CMD_STATUS_ERROR, -1, -1);
- rtn = this->read_byte(mtd);
- this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1);
-
- if (!(rtn & ERR_STAT_ECC_AVAILABLE)) {
- /* err_ecc_not_avail */
- er_stat |= 1 << 1;
- goto out;
- }
-
- len = mtd->writesize;
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf) {
- er_stat = 1;
- goto out;
- }
-
- /* recovery read */
- rtn = nand_do_read(mtd, page, len, &retlen, buf);
-
- /* if read failed or > 1-bit error corrected */
- if (rtn || (mtd->ecc_stats.corrected - corrected) > 1)
- er_stat |= 1 << 1;
- kfree(buf);
- }
-out:
- rtn = status;
- if (er_stat == 0) { /* if ECC is available */
- rtn = (status & ~NAND_STATUS_FAIL); /* clear the error bit */
- }
-
- return rtn;
-}
-#endif
-
-/*
- * Main initialization routine
- */
-static int __init rtc_from4_init(void)
-{
- struct nand_chip *this;
- unsigned short bcr1, bcr2, wcr2;
- int i;
- int ret;
-
- /* Allocate memory for MTD device structure and private data */
- rtc_from4_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!rtc_from4_mtd) {
- printk("Unable to allocate Renesas NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&rtc_from4_mtd[1]);
-
- /* Initialize structures */
- memset(rtc_from4_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- rtc_from4_mtd->priv = this;
- rtc_from4_mtd->owner = THIS_MODULE;
-
- /* set area 5 as PCMCIA mode to clear the spec of tDH(Data hold time;9ns min) */
- bcr1 = *SH77X9_BCR1 & ~0x0002;
- bcr1 |= 0x0002;
- *SH77X9_BCR1 = bcr1;
-
- /* set */
- bcr2 = *SH77X9_BCR2 & ~0x0c00;
- bcr2 |= 0x0800;
- *SH77X9_BCR2 = bcr2;
-
- /* set area 5 wait states */
- wcr2 = *SH77X9_WCR2 & ~0x1c00;
- wcr2 |= 0x1c00;
- *SH77X9_WCR2 = wcr2;
-
- /* Set address of NAND IO lines */
- this->IO_ADDR_R = rtc_from4_fio_base;
- this->IO_ADDR_W = rtc_from4_fio_base;
- /* Set address of hardware control function */
- this->cmd_ctrl = rtc_from4_hwcontrol;
- /* Set address of chip select function */
- this->select_chip = rtc_from4_nand_select_chip;
- /* command delay time (in us) */
- this->chip_delay = 100;
- /* return the status of the Ready/Busy line */
- this->dev_ready = rtc_from4_nand_device_ready;
-
-#ifdef RTC_FROM4_HWECC
- printk(KERN_INFO "rtc_from4_init: using hardware ECC detection.\n");
-
- this->ecc.mode = NAND_ECC_HW_SYNDROME;
- this->ecc.size = 512;
- this->ecc.bytes = 8;
- this->ecc.strength = 3;
- /* return the status of extra status and ECC checks */
- this->errstat = rtc_from4_errstat;
- /* set the nand_oobinfo to support FPGA H/W error detection */
- this->ecc.layout = &rtc_from4_nand_oobinfo;
- this->ecc.hwctl = rtc_from4_enable_hwecc;
- this->ecc.calculate = rtc_from4_calculate_ecc;
- this->ecc.correct = rtc_from4_correct_data;
-
- /* We could create the decoder on demand, if memory is a concern.
- * This way we have it handy, if an error happens
- *
- * Symbolsize is 10 (bits)
- * Primitve polynomial is x^10+x^3+1
- * first consecutive root is 0
- * primitve element to generate roots = 1
- * generator polinomial degree = 6
- */
- rs_decoder = init_rs(10, 0x409, 0, 1, 6);
- if (!rs_decoder) {
- printk(KERN_ERR "Could not create a RS decoder\n");
- ret = -ENOMEM;
- goto err_1;
- }
-#else
- printk(KERN_INFO "rtc_from4_init: using software ECC detection.\n");
-
- this->ecc.mode = NAND_ECC_SOFT;
-#endif
-
- /* set the bad block tables to support debugging */
- this->bbt_td = &rtc_from4_bbt_main_descr;
- this->bbt_md = &rtc_from4_bbt_mirror_descr;
-
- /* Scan to find existence of the device */
- if (nand_scan(rtc_from4_mtd, RTC_FROM4_MAX_CHIPS)) {
- ret = -ENXIO;
- goto err_2;
- }
-
- /* Perform 'device recovery' for each chip in case there was a power loss. */
- for (i = 0; i < this->numchips; i++) {
- deplete(rtc_from4_mtd, i);
- }
-
-#if RTC_FROM4_NO_VIRTBLOCKS
- /* use a smaller erase block to minimize wasted space when a block is bad */
- /* note: this uses eight times as much RAM as using the default and makes */
- /* mounts take four times as long. */
- rtc_from4_mtd->flags |= MTD_NO_VIRTBLOCKS;
-#endif
-
- /* Register the partitions */
- ret = mtd_device_register(rtc_from4_mtd, partition_info,
- NUM_PARTITIONS);
- if (ret)
- goto err_3;
-
- /* Return happy */
- return 0;
-err_3:
- nand_release(rtc_from4_mtd);
-err_2:
- free_rs(rs_decoder);
-err_1:
- kfree(rtc_from4_mtd);
- return ret;
-}
-
-module_init(rtc_from4_init);
-
-/*
- * Clean up routine
- */
-static void __exit rtc_from4_cleanup(void)
-{
- /* Release resource, unregister partitions */
- nand_release(rtc_from4_mtd);
-
- /* Free the MTD device structure */
- kfree(rtc_from4_mtd);
-
-#ifdef RTC_FROM4_HWECC
- /* Free the reed solomon resources */
- if (rs_decoder) {
- free_rs(rs_decoder);
- }
-#endif
-}
-
-module_exit(rtc_from4_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("d.marlin <dmarlin@redhat.com");
-MODULE_DESCRIPTION("Board-specific glue layer for AG-AND flash on Renesas FROM_BOARD4");
return pdata;
}
#else /* CONFIG_OF */
-#define of_flctl_match NULL
static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
{
return NULL;
.driver = {
.name = "sh_flctl",
.owner = THIS_MODULE,
- .of_match_table = of_flctl_match,
+ .of_match_table = of_match_ptr(of_flctl_match),
},
};
-static int __init flctl_nand_init(void)
-{
- return platform_driver_probe(&flctl_driver, flctl_probe);
-}
-
-static void __exit flctl_nand_cleanup(void)
-{
- platform_driver_unregister(&flctl_driver);
-}
-
-module_init(flctl_nand_init);
-module_exit(flctl_nand_cleanup);
+module_platform_driver_probe(flctl_driver, flctl_probe);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yoshihiro Shimoda");
#include <linux/kernel.h>
#include <linux/mtd/nand.h>
#include <linux/module.h>
+#include <linux/sizes.h>
#include "sm_common.h"
static struct nand_ecclayout nand_oob_sm = {
return error;
}
-
static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
- {"SmartMedia 1MiB 5V", 0x6e, 256, 1, 0x1000, 0},
- {"SmartMedia 1MiB 3,3V", 0xe8, 256, 1, 0x1000, 0},
- {"SmartMedia 1MiB 3,3V", 0xec, 256, 1, 0x1000, 0},
- {"SmartMedia 2MiB 3,3V", 0xea, 256, 2, 0x1000, 0},
- {"SmartMedia 2MiB 5V", 0x64, 256, 2, 0x1000, 0},
- {"SmartMedia 2MiB 3,3V ROM", 0x5d, 512, 2, 0x2000, NAND_ROM},
- {"SmartMedia 4MiB 3,3V", 0xe3, 512, 4, 0x2000, 0},
- {"SmartMedia 4MiB 3,3/5V", 0xe5, 512, 4, 0x2000, 0},
- {"SmartMedia 4MiB 5V", 0x6b, 512, 4, 0x2000, 0},
- {"SmartMedia 4MiB 3,3V ROM", 0xd5, 512, 4, 0x2000, NAND_ROM},
- {"SmartMedia 8MiB 3,3V", 0xe6, 512, 8, 0x2000, 0},
- {"SmartMedia 8MiB 3,3V ROM", 0xd6, 512, 8, 0x2000, NAND_ROM},
- {"SmartMedia 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
- {"SmartMedia 16MiB 3,3V ROM", 0x57, 512, 16, 0x4000, NAND_ROM},
- {"SmartMedia 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
- {"SmartMedia 32MiB 3,3V ROM", 0x58, 512, 32, 0x4000, NAND_ROM},
- {"SmartMedia 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
- {"SmartMedia 64MiB 3,3V ROM", 0xd9, 512, 64, 0x4000, NAND_ROM},
- {"SmartMedia 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
- {"SmartMedia 128MiB 3,3V ROM", 0xda, 512, 128, 0x4000, NAND_ROM},
- {"SmartMedia 256MiB 3,3V", 0x71, 512, 256, 0x4000 },
- {"SmartMedia 256MiB 3,3V ROM", 0x5b, 512, 256, 0x4000, NAND_ROM},
- {NULL,}
+ LEGACY_ID_NAND("SmartMedia 2MiB 3,3V ROM", 0x5d, 2, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3V", 0xe3, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3/5V", 0xe5, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 5V", 0x6b, 4, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 4MiB 3,3V ROM", 0xd5, 4, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 8MiB 3,3V", 0xe6, 8, SZ_8K, 0),
+ LEGACY_ID_NAND("SmartMedia 8MiB 3,3V ROM", 0xd6, 8, SZ_8K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 16MiB 3,3V", 0x73, 16, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 16MiB 3,3V ROM", 0x57, 16, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 32MiB 3,3V", 0x75, 32, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 32MiB 3,3V ROM", 0x58, 32, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 64MiB 3,3V", 0x76, 64, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 64MiB 3,3V ROM", 0xd9, 64, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 128MiB 3,3V", 0x79, 128, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 128MiB 3,3V ROM", 0xda, 128, SZ_16K, NAND_ROM),
+ LEGACY_ID_NAND("SmartMedia 256MiB 3, 3V", 0x71, 256, SZ_16K, 0),
+ LEGACY_ID_NAND("SmartMedia 256MiB 3,3V ROM", 0x5b, 256, SZ_16K, NAND_ROM),
+ {NULL}
};
static struct nand_flash_dev nand_xd_flash_ids[] = {
-
- {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
- {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
- {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
- {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
- {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, NAND_BROKEN_XD},
- {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, NAND_BROKEN_XD},
- {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, NAND_BROKEN_XD},
- {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, NAND_BROKEN_XD},
- {NULL,}
+ LEGACY_ID_NAND("xD 16MiB 3,3V", 0x73, 16, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 32MiB 3,3V", 0x75, 32, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 64MiB 3,3V", 0x76, 64, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 128MiB 3,3V", 0x79, 128, SZ_16K, 0),
+ LEGACY_ID_NAND("xD 256MiB 3,3V", 0x71, 256, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 512MiB 3,3V", 0xdc, 512, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 1GiB 3,3V", 0xd3, 1024, SZ_16K, NAND_BROKEN_XD),
+ LEGACY_ID_NAND("xD 2GiB 3,3V", 0xd5, 2048, SZ_16K, NAND_BROKEN_XD),
+ {NULL}
};
int sm_register_device(struct mtd_info *mtd, int smartmedia)
},
};
-static int __init txx9ndfmc_init(void)
-{
- return platform_driver_probe(&txx9ndfmc_driver, txx9ndfmc_probe);
-}
-
-static void __exit txx9ndfmc_exit(void)
-{
- platform_driver_unregister(&txx9ndfmc_driver);
-}
-
-module_init(txx9ndfmc_init);
-module_exit(txx9ndfmc_exit);
+module_platform_driver_probe(txx9ndfmc_driver, txx9ndfmc_probe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver");
while ((pp = of_get_next_child(node, pp))) {
const __be32 *reg;
int len;
+ int a_cells, s_cells;
reg = of_get_property(pp, "reg", &len);
if (!reg) {
continue;
}
- (*pparts)[i].offset = be32_to_cpu(reg[0]);
- (*pparts)[i].size = be32_to_cpu(reg[1]);
+ a_cells = of_n_addr_cells(pp);
+ s_cells = of_n_size_cells(pp);
+ (*pparts)[i].offset = of_read_number(reg, a_cells);
+ (*pparts)[i].size = of_read_number(reg + a_cells, s_cells);
partname = of_get_property(pp, "label", &len);
if (!partname)
config MTD_ONENAND_OTP
bool "OneNAND OTP Support"
- select HAVE_MTD_OTP
help
One Block of the NAND Flash Array memory is reserved as
a One-Time Programmable Block memory area.
And more recent chips
-config MTD_ONENAND_SIM
- tristate "OneNAND simulator support"
- help
- The simulator may simulate various OneNAND flash chips for the
- OneNAND MTD layer.
-
endif # MTD_ONENAND
obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o
obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung.o
-# Simulator
-obj-$(CONFIG_MTD_ONENAND_SIM) += onenand_sim.o
-
onenand-objs = onenand_base.o onenand_bbt.o
},
};
-static int __init omap2_onenand_init(void)
-{
- printk(KERN_INFO "OneNAND driver initializing\n");
- return platform_driver_register(&omap2_onenand_driver);
-}
-
-static void __exit omap2_onenand_exit(void)
-{
- platform_driver_unregister(&omap2_onenand_driver);
-}
-
-module_init(omap2_onenand_init);
-module_exit(omap2_onenand_exit);
+module_platform_driver(omap2_onenand_driver);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL");
+++ /dev/null
-/*
- * linux/drivers/mtd/onenand/onenand_sim.c
- *
- * The OneNAND simulator
- *
- * Copyright © 2005-2007 Samsung Electronics
- * Kyungmin Park <kyungmin.park@samsung.com>
- *
- * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
- * Flex-OneNAND simulator support
- * Copyright (C) Samsung Electronics, 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/onenand.h>
-
-#include <linux/io.h>
-
-#ifndef CONFIG_ONENAND_SIM_MANUFACTURER
-#define CONFIG_ONENAND_SIM_MANUFACTURER 0xec
-#endif
-
-#ifndef CONFIG_ONENAND_SIM_DEVICE_ID
-#define CONFIG_ONENAND_SIM_DEVICE_ID 0x04
-#endif
-
-#define CONFIG_FLEXONENAND ((CONFIG_ONENAND_SIM_DEVICE_ID >> 9) & 1)
-
-#ifndef CONFIG_ONENAND_SIM_VERSION_ID
-#define CONFIG_ONENAND_SIM_VERSION_ID 0x1e
-#endif
-
-#ifndef CONFIG_ONENAND_SIM_TECHNOLOGY_ID
-#define CONFIG_ONENAND_SIM_TECHNOLOGY_ID CONFIG_FLEXONENAND
-#endif
-
-/* Initial boundary values for Flex-OneNAND Simulator */
-#ifndef CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY
-#define CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY 0x01
-#endif
-
-#ifndef CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY
-#define CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY 0x01
-#endif
-
-static int manuf_id = CONFIG_ONENAND_SIM_MANUFACTURER;
-static int device_id = CONFIG_ONENAND_SIM_DEVICE_ID;
-static int version_id = CONFIG_ONENAND_SIM_VERSION_ID;
-static int technology_id = CONFIG_ONENAND_SIM_TECHNOLOGY_ID;
-static int boundary[] = {
- CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY,
- CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY,
-};
-
-struct onenand_flash {
- void __iomem *base;
- void __iomem *data;
-};
-
-#define ONENAND_CORE(flash) (flash->data)
-#define ONENAND_CORE_SPARE(flash, this, offset) \
- ((flash->data) + (this->chipsize) + (offset >> 5))
-
-#define ONENAND_MAIN_AREA(this, offset) \
- (this->base + ONENAND_DATARAM + offset)
-
-#define ONENAND_SPARE_AREA(this, offset) \
- (this->base + ONENAND_SPARERAM + offset)
-
-#define ONENAND_GET_WP_STATUS(this) \
- (readw(this->base + ONENAND_REG_WP_STATUS))
-
-#define ONENAND_SET_WP_STATUS(v, this) \
- (writew(v, this->base + ONENAND_REG_WP_STATUS))
-
-/* It has all 0xff chars */
-#define MAX_ONENAND_PAGESIZE (4096 + 128)
-static unsigned char *ffchars;
-
-#if CONFIG_FLEXONENAND
-#define PARTITION_NAME "Flex-OneNAND simulator partition"
-#else
-#define PARTITION_NAME "OneNAND simulator partition"
-#endif
-
-static struct mtd_partition os_partitions[] = {
- {
- .name = PARTITION_NAME,
- .offset = 0,
- .size = MTDPART_SIZ_FULL,
- },
-};
-
-/*
- * OneNAND simulator mtd
- */
-struct onenand_info {
- struct mtd_info mtd;
- struct mtd_partition *parts;
- struct onenand_chip onenand;
- struct onenand_flash flash;
-};
-
-static struct onenand_info *info;
-
-#define DPRINTK(format, args...) \
-do { \
- printk(KERN_DEBUG "%s[%d]: " format "\n", __func__, \
- __LINE__, ##args); \
-} while (0)
-
-/**
- * onenand_lock_handle - Handle Lock scheme
- * @this: OneNAND device structure
- * @cmd: The command to be sent
- *
- * Send lock command to OneNAND device.
- * The lock scheme depends on chip type.
- */
-static void onenand_lock_handle(struct onenand_chip *this, int cmd)
-{
- int block_lock_scheme;
- int status;
-
- status = ONENAND_GET_WP_STATUS(this);
- block_lock_scheme = !(this->options & ONENAND_HAS_CONT_LOCK);
-
- switch (cmd) {
- case ONENAND_CMD_UNLOCK:
- case ONENAND_CMD_UNLOCK_ALL:
- if (block_lock_scheme)
- ONENAND_SET_WP_STATUS(ONENAND_WP_US, this);
- else
- ONENAND_SET_WP_STATUS(status | ONENAND_WP_US, this);
- break;
-
- case ONENAND_CMD_LOCK:
- if (block_lock_scheme)
- ONENAND_SET_WP_STATUS(ONENAND_WP_LS, this);
- else
- ONENAND_SET_WP_STATUS(status | ONENAND_WP_LS, this);
- break;
-
- case ONENAND_CMD_LOCK_TIGHT:
- if (block_lock_scheme)
- ONENAND_SET_WP_STATUS(ONENAND_WP_LTS, this);
- else
- ONENAND_SET_WP_STATUS(status | ONENAND_WP_LTS, this);
- break;
-
- default:
- break;
- }
-}
-
-/**
- * onenand_bootram_handle - Handle BootRAM area
- * @this: OneNAND device structure
- * @cmd: The command to be sent
- *
- * Emulate BootRAM area. It is possible to do basic operation using BootRAM.
- */
-static void onenand_bootram_handle(struct onenand_chip *this, int cmd)
-{
- switch (cmd) {
- case ONENAND_CMD_READID:
- writew(manuf_id, this->base);
- writew(device_id, this->base + 2);
- writew(version_id, this->base + 4);
- break;
-
- default:
- /* REVIST: Handle other commands */
- break;
- }
-}
-
-/**
- * onenand_update_interrupt - Set interrupt register
- * @this: OneNAND device structure
- * @cmd: The command to be sent
- *
- * Update interrupt register. The status depends on command.
- */
-static void onenand_update_interrupt(struct onenand_chip *this, int cmd)
-{
- int interrupt = ONENAND_INT_MASTER;
-
- switch (cmd) {
- case ONENAND_CMD_READ:
- case ONENAND_CMD_READOOB:
- interrupt |= ONENAND_INT_READ;
- break;
-
- case ONENAND_CMD_PROG:
- case ONENAND_CMD_PROGOOB:
- interrupt |= ONENAND_INT_WRITE;
- break;
-
- case ONENAND_CMD_ERASE:
- interrupt |= ONENAND_INT_ERASE;
- break;
-
- case ONENAND_CMD_RESET:
- interrupt |= ONENAND_INT_RESET;
- break;
-
- default:
- break;
- }
-
- writew(interrupt, this->base + ONENAND_REG_INTERRUPT);
-}
-
-/**
- * onenand_check_overwrite - Check if over-write happened
- * @dest: The destination pointer
- * @src: The source pointer
- * @count: The length to be check
- *
- * Returns: 0 on same, otherwise 1
- *
- * Compare the source with destination
- */
-static int onenand_check_overwrite(void *dest, void *src, size_t count)
-{
- unsigned int *s = (unsigned int *) src;
- unsigned int *d = (unsigned int *) dest;
- int i;
-
- count >>= 2;
- for (i = 0; i < count; i++)
- if ((*s++ ^ *d++) != 0)
- return 1;
-
- return 0;
-}
-
-/**
- * onenand_data_handle - Handle OneNAND Core and DataRAM
- * @this: OneNAND device structure
- * @cmd: The command to be sent
- * @dataram: Which dataram used
- * @offset: The offset to OneNAND Core
- *
- * Copy data from OneNAND Core to DataRAM (read)
- * Copy data from DataRAM to OneNAND Core (write)
- * Erase the OneNAND Core (erase)
- */
-static void onenand_data_handle(struct onenand_chip *this, int cmd,
- int dataram, unsigned int offset)
-{
- struct mtd_info *mtd = &info->mtd;
- struct onenand_flash *flash = this->priv;
- int main_offset, spare_offset, die = 0;
- void __iomem *src;
- void __iomem *dest;
- unsigned int i;
- static int pi_operation;
- int erasesize, rgn;
-
- if (dataram) {
- main_offset = mtd->writesize;
- spare_offset = mtd->oobsize;
- } else {
- main_offset = 0;
- spare_offset = 0;
- }
-
- if (pi_operation) {
- die = readw(this->base + ONENAND_REG_START_ADDRESS2);
- die >>= ONENAND_DDP_SHIFT;
- }
-
- switch (cmd) {
- case FLEXONENAND_CMD_PI_ACCESS:
- pi_operation = 1;
- break;
-
- case ONENAND_CMD_RESET:
- pi_operation = 0;
- break;
-
- case ONENAND_CMD_READ:
- src = ONENAND_CORE(flash) + offset;
- dest = ONENAND_MAIN_AREA(this, main_offset);
- if (pi_operation) {
- writew(boundary[die], this->base + ONENAND_DATARAM);
- break;
- }
- memcpy(dest, src, mtd->writesize);
- /* Fall through */
-
- case ONENAND_CMD_READOOB:
- src = ONENAND_CORE_SPARE(flash, this, offset);
- dest = ONENAND_SPARE_AREA(this, spare_offset);
- memcpy(dest, src, mtd->oobsize);
- break;
-
- case ONENAND_CMD_PROG:
- src = ONENAND_MAIN_AREA(this, main_offset);
- dest = ONENAND_CORE(flash) + offset;
- if (pi_operation) {
- boundary[die] = readw(this->base + ONENAND_DATARAM);
- break;
- }
- /* To handle partial write */
- for (i = 0; i < (1 << mtd->subpage_sft); i++) {
- int off = i * this->subpagesize;
- if (!memcmp(src + off, ffchars, this->subpagesize))
- continue;
- if (memcmp(dest + off, ffchars, this->subpagesize) &&
- onenand_check_overwrite(dest + off, src + off, this->subpagesize))
- printk(KERN_ERR "over-write happened at 0x%08x\n", offset);
- memcpy(dest + off, src + off, this->subpagesize);
- }
- /* Fall through */
-
- case ONENAND_CMD_PROGOOB:
- src = ONENAND_SPARE_AREA(this, spare_offset);
- /* Check all data is 0xff chars */
- if (!memcmp(src, ffchars, mtd->oobsize))
- break;
-
- dest = ONENAND_CORE_SPARE(flash, this, offset);
- if (memcmp(dest, ffchars, mtd->oobsize) &&
- onenand_check_overwrite(dest, src, mtd->oobsize))
- printk(KERN_ERR "OOB: over-write happened at 0x%08x\n",
- offset);
- memcpy(dest, src, mtd->oobsize);
- break;
-
- case ONENAND_CMD_ERASE:
- if (pi_operation)
- break;
-
- if (FLEXONENAND(this)) {
- rgn = flexonenand_region(mtd, offset);
- erasesize = mtd->eraseregions[rgn].erasesize;
- } else
- erasesize = mtd->erasesize;
-
- memset(ONENAND_CORE(flash) + offset, 0xff, erasesize);
- memset(ONENAND_CORE_SPARE(flash, this, offset), 0xff,
- (erasesize >> 5));
- break;
-
- default:
- break;
- }
-}
-
-/**
- * onenand_command_handle - Handle command
- * @this: OneNAND device structure
- * @cmd: The command to be sent
- *
- * Emulate OneNAND command.
- */
-static void onenand_command_handle(struct onenand_chip *this, int cmd)
-{
- unsigned long offset = 0;
- int block = -1, page = -1, bufferram = -1;
- int dataram = 0;
-
- switch (cmd) {
- case ONENAND_CMD_UNLOCK:
- case ONENAND_CMD_LOCK:
- case ONENAND_CMD_LOCK_TIGHT:
- case ONENAND_CMD_UNLOCK_ALL:
- onenand_lock_handle(this, cmd);
- break;
-
- case ONENAND_CMD_BUFFERRAM:
- /* Do nothing */
- return;
-
- default:
- block = (int) readw(this->base + ONENAND_REG_START_ADDRESS1);
- if (block & (1 << ONENAND_DDP_SHIFT)) {
- block &= ~(1 << ONENAND_DDP_SHIFT);
- /* The half of chip block */
- block += this->chipsize >> (this->erase_shift + 1);
- }
- if (cmd == ONENAND_CMD_ERASE)
- break;
-
- page = (int) readw(this->base + ONENAND_REG_START_ADDRESS8);
- page = (page >> ONENAND_FPA_SHIFT);
- bufferram = (int) readw(this->base + ONENAND_REG_START_BUFFER);
- bufferram >>= ONENAND_BSA_SHIFT;
- bufferram &= ONENAND_BSA_DATARAM1;
- dataram = (bufferram == ONENAND_BSA_DATARAM1) ? 1 : 0;
- break;
- }
-
- if (block != -1)
- offset = onenand_addr(this, block);
-
- if (page != -1)
- offset += page << this->page_shift;
-
- onenand_data_handle(this, cmd, dataram, offset);
-
- onenand_update_interrupt(this, cmd);
-}
-
-/**
- * onenand_writew - [OneNAND Interface] Emulate write operation
- * @value: value to write
- * @addr: address to write
- *
- * Write OneNAND register with value
- */
-static void onenand_writew(unsigned short value, void __iomem * addr)
-{
- struct onenand_chip *this = info->mtd.priv;
-
- /* BootRAM handling */
- if (addr < this->base + ONENAND_DATARAM) {
- onenand_bootram_handle(this, value);
- return;
- }
- /* Command handling */
- if (addr == this->base + ONENAND_REG_COMMAND)
- onenand_command_handle(this, value);
-
- writew(value, addr);
-}
-
-/**
- * flash_init - Initialize OneNAND simulator
- * @flash: OneNAND simulator data strucutres
- *
- * Initialize OneNAND simulator.
- */
-static int __init flash_init(struct onenand_flash *flash)
-{
- int density, size;
- int buffer_size;
-
- flash->base = kzalloc(131072, GFP_KERNEL);
- if (!flash->base) {
- printk(KERN_ERR "Unable to allocate base address.\n");
- return -ENOMEM;
- }
-
- density = device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
- density &= ONENAND_DEVICE_DENSITY_MASK;
- size = ((16 << 20) << density);
-
- ONENAND_CORE(flash) = vmalloc(size + (size >> 5));
- if (!ONENAND_CORE(flash)) {
- printk(KERN_ERR "Unable to allocate nand core address.\n");
- kfree(flash->base);
- return -ENOMEM;
- }
-
- memset(ONENAND_CORE(flash), 0xff, size + (size >> 5));
-
- /* Setup registers */
- writew(manuf_id, flash->base + ONENAND_REG_MANUFACTURER_ID);
- writew(device_id, flash->base + ONENAND_REG_DEVICE_ID);
- writew(version_id, flash->base + ONENAND_REG_VERSION_ID);
- writew(technology_id, flash->base + ONENAND_REG_TECHNOLOGY);
-
- if (density < 2 && (!CONFIG_FLEXONENAND))
- buffer_size = 0x0400; /* 1KiB page */
- else
- buffer_size = 0x0800; /* 2KiB page */
- writew(buffer_size, flash->base + ONENAND_REG_DATA_BUFFER_SIZE);
-
- return 0;
-}
-
-/**
- * flash_exit - Clean up OneNAND simulator
- * @flash: OneNAND simulator data structures
- *
- * Clean up OneNAND simulator.
- */
-static void flash_exit(struct onenand_flash *flash)
-{
- vfree(ONENAND_CORE(flash));
- kfree(flash->base);
-}
-
-static int __init onenand_sim_init(void)
-{
- /* Allocate all 0xff chars pointer */
- ffchars = kmalloc(MAX_ONENAND_PAGESIZE, GFP_KERNEL);
- if (!ffchars) {
- printk(KERN_ERR "Unable to allocate ff chars.\n");
- return -ENOMEM;
- }
- memset(ffchars, 0xff, MAX_ONENAND_PAGESIZE);
-
- /* Allocate OneNAND simulator mtd pointer */
- info = kzalloc(sizeof(struct onenand_info), GFP_KERNEL);
- if (!info) {
- printk(KERN_ERR "Unable to allocate core structures.\n");
- kfree(ffchars);
- return -ENOMEM;
- }
-
- /* Override write_word function */
- info->onenand.write_word = onenand_writew;
-
- if (flash_init(&info->flash)) {
- printk(KERN_ERR "Unable to allocate flash.\n");
- kfree(ffchars);
- kfree(info);
- return -ENOMEM;
- }
-
- info->parts = os_partitions;
-
- info->onenand.base = info->flash.base;
- info->onenand.priv = &info->flash;
-
- info->mtd.name = "OneNAND simulator";
- info->mtd.priv = &info->onenand;
- info->mtd.owner = THIS_MODULE;
-
- if (onenand_scan(&info->mtd, 1)) {
- flash_exit(&info->flash);
- kfree(ffchars);
- kfree(info);
- return -ENXIO;
- }
-
- mtd_device_register(&info->mtd, info->parts,
- ARRAY_SIZE(os_partitions));
-
- return 0;
-}
-
-static void __exit onenand_sim_exit(void)
-{
- struct onenand_chip *this = info->mtd.priv;
- struct onenand_flash *flash = this->priv;
-
- onenand_release(&info->mtd);
- flash_exit(flash);
- kfree(ffchars);
- kfree(info);
-}
-
-module_init(onenand_sim_init);
-module_exit(onenand_sim_exit);
-
-MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
-MODULE_DESCRIPTION("The OneNAND flash simulator");
-MODULE_LICENSE("GPL");
}
bfin_mac_hwtstamp_init(ndev);
- if (bfin_phc_init(ndev, &pdev->dev)) {
+ rc = bfin_phc_init(ndev, &pdev->dev);
+ if (rc) {
dev_err(&pdev->dev, "Cannot register PHC device!\n");
goto out_err_phc;
}
req = get_mac_list_cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
- wrb, &get_mac_list_cmd);
-
+ OPCODE_COMMON_GET_MAC_LIST,
+ get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
req->hdr.domain = domain;
req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
req->perm_override = 1;
mdelay(1);
} else {
be_rx_compl_discard(rxo, rxcp);
- be_cq_notify(adapter, rx_cq->id, true, 1);
+ be_cq_notify(adapter, rx_cq->id, false, 1);
if (rxcp->num_rcvd == 0)
break;
}
q = &rxo->q;
if (q->created) {
be_cmd_rxq_destroy(adapter, q);
- /* After the rxq is invalidated, wait for a grace time
- * of 1ms for all dma to end and the flush compl to
- * arrive
- */
- mdelay(1);
be_rx_cq_clean(rxo);
}
be_queue_free(adapter, q);
* all tx skbs are freed.
*/
be_tx_compl_clean(adapter);
+ netif_tx_disable(netdev);
be_rx_qs_destroy(adapter);
if (!status)
be_link_status_update(adapter, link_status);
+ netif_tx_start_all_queues(netdev);
be_roce_dev_open(adapter);
return 0;
err:
goto done;
}
+ pci_disable_sriov(adapter->pdev);
+
for_all_vfs(adapter, vf_cfg, vf) {
if (lancer_chip(adapter))
be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
}
- pci_disable_sriov(adapter->pdev);
done:
kfree(adapter->vf_cfg);
adapter->num_vfs = 0;
dev_info(dev, "Device supports %d VFs and not %d\n",
adapter->dev_num_vfs, num_vfs);
adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
-
- status = pci_enable_sriov(adapter->pdev, num_vfs);
- if (status) {
- dev_err(dev, "SRIOV enable failed\n");
- adapter->num_vfs = 0;
+ if (!adapter->num_vfs)
return 0;
- }
}
status = be_vf_setup_init(adapter);
be_cmd_enable_vf(adapter, vf + 1);
}
+
+ if (!old_vfs) {
+ status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (status) {
+ dev_err(dev, "SRIOV enable failed\n");
+ adapter->num_vfs = 0;
+ goto err;
+ }
+ }
return 0;
err:
dev_err(dev, "VF setup failed\n");
be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
- if (be_physfn(adapter) && num_vfs) {
+ if (be_physfn(adapter)) {
if (adapter->dev_num_vfs)
be_vf_setup(adapter);
else
#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
#define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
+struct fec_enet_delayed_work {
+ struct delayed_work delay_work;
+ bool timeout;
+};
+
/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
* tx_bd_base always point to the base of the buffer descriptors. The
* cur_rx and cur_tx point to the currently available buffer.
/* The ring entries to be free()ed */
struct bufdesc *dirty_tx;
- /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
- spinlock_t hw_lock;
-
struct platform_device *pdev;
int opened;
int hwts_rx_en;
int hwts_tx_en;
struct timer_list time_keep;
-
+ struct fec_enet_delayed_work delay_work;
};
void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev);
u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = 0x2; /* ETHEREN */
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ napi_disable(&fep->napi);
+ netif_stop_queue(ndev);
+ netif_tx_lock(ndev);
+ }
+
/* Whack a reset. We should wait for this. */
writel(1, fep->hwp + FEC_ECNTRL);
udelay(10);
/* Enable interrupts we wish to service */
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+
+ if (netif_running(ndev)) {
+ netif_device_attach(ndev);
+ napi_enable(&fep->napi);
+ netif_wake_queue(ndev);
+ netif_tx_unlock(ndev);
+ }
}
static void
ndev->stats.tx_errors++;
- fec_restart(ndev, fep->full_duplex);
- netif_wake_queue(ndev);
+ fep->delay_work.timeout = true;
+ schedule_delayed_work(&(fep->delay_work.delay_work), 0);
+}
+
+static void fec_enet_work(struct work_struct *work)
+{
+ struct fec_enet_private *fep =
+ container_of(work,
+ struct fec_enet_private,
+ delay_work.delay_work.work);
+
+ if (fep->delay_work.timeout) {
+ fep->delay_work.timeout = false;
+ fec_restart(fep->netdev, fep->full_duplex);
+ netif_wake_queue(fep->netdev);
+ }
}
static void
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phy_dev = fep->phy_dev;
- unsigned long flags;
-
int status_change = 0;
- spin_lock_irqsave(&fep->hw_lock, flags);
-
/* Prevent a state halted on mii error */
if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
phy_dev->state = PHY_RESUMING;
- goto spin_unlock;
+ return;
}
if (phy_dev->link) {
}
}
-spin_unlock:
- spin_unlock_irqrestore(&fep->hw_lock, flags);
-
if (status_change)
phy_print_status(phy_dev);
}
return -ENOMEM;
memset(cbd_base, 0, PAGE_SIZE);
- spin_lock_init(&fep->hw_lock);
fep->netdev = ndev;
if (fep->bufdesc_ex && fep->ptp_clock)
netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
+ INIT_DELAYED_WORK(&(fep->delay_work.delay_work), fec_enet_work);
return 0;
failed_register:
struct fec_enet_private *fep = netdev_priv(ndev);
int i;
+ cancel_delayed_work_sync(&(fep->delay_work.delay_work));
unregister_netdev(ndev);
fec_enet_mii_remove(fep);
del_timer_sync(&fep->time_keep);
ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
&efx->pci_dev->dev);
- if (!ptp->phc_clock)
+ if (IS_ERR(ptp->phc_clock)) {
+ rc = PTR_ERR(ptp->phc_clock);
goto fail3;
+ }
INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
if (info->has_iqueue) {
gxio_mpipe_request_notif_ring_interrupt(
&context, cpu_x(cpu), cpu_y(cpu),
- 1, ingress_irq, info->iqueue.ring);
+ KERNEL_PL, ingress_irq, info->iqueue.ring);
}
}
goto alloc_rx_failed;
/* Allocate rx skbs */
- if (spider_net_alloc_rx_skbs(card))
+ result = spider_net_alloc_rx_skbs(card);
+ if (result)
goto alloc_skbs_failed;
spider_net_set_multi(netdev);
{
struct bfin_sir_self *self = netdev_priv(dev);
struct bfin_sir_port *port = self->sir_port;
- int err = -ENOMEM;
+ int err;
self->newspeed = 0;
self->speed = 9600;
bfin_sir_set_speed(port, 9600);
self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME);
- if (!self->irlap)
+ if (!self->irlap) {
+ err = -ENOMEM;
goto err_irlap;
+ }
INIT_WORK(&self->work, bfin_sir_send_work);
config MDIO_GPIO
tristate "Support for GPIO lib-based bitbanged MDIO buses"
- depends on MDIO_BITBANG && GENERIC_GPIO
+ depends on MDIO_BITBANG && GPIOLIB
---help---
Supports GPIO lib-based MDIO busses.
.driver_info = 0,
},
+/* Dell Wireless 5804 (Novatel E371) - handled by qmi_wwan */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x819b, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* AnyDATA ADU960S - handled by qmi_wwan */
{
USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* Dell Wireless 5804 (Novatel E371) */
+ USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x819b,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
{ /* ADU960S */
USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a,
USB_CLASS_COMM,
dev_dbg(&dev->udev->dev, "%s", __func__);
+ /* The SIERRA_NET_HIP_MSYNC_ID command appears to request that the
+ * firmware restart itself. After restarting, the modem will respond
+ * with the SIERRA_NET_HIP_RESTART_ID indication. The driver continues
+ * sending MSYNC commands every few seconds until it receives the
+ * RESTART event from the firmware
+ */
+
/* tell modem we are ready */
status = sierra_net_send_sync(dev);
if (status < 0)
/* set context index initially to 0 - prepares tx hdr template */
sierra_net_set_ctx_index(priv, 0);
+ /* prepare sync message template */
+ memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg));
+
/* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */
dev->rx_urb_size = SIERRA_NET_RX_URB_SIZE;
if (dev->udev->speed != USB_SPEED_HIGH)
kfree(priv);
return -ENODEV;
}
- /* prepare sync message from template */
- memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg));
-
- /* initiate the sync sequence */
- sierra_net_dosync(dev);
return 0;
}
netdev_err(dev->net,
"usb_control_msg failed, status %d\n", status);
- sierra_net_set_private(dev, NULL);
+ usbnet_status_stop(dev);
+ sierra_net_set_private(dev, NULL);
kfree(priv);
}
.tx_fixup = sierra_net_tx_fixup,
};
+static int
+sierra_net_probe(struct usb_interface *udev, const struct usb_device_id *prod)
+{
+ int ret;
+
+ ret = usbnet_probe(udev, prod);
+ if (ret == 0) {
+ struct usbnet *dev = usb_get_intfdata(udev);
+
+ ret = usbnet_status_start(dev, GFP_KERNEL);
+ if (ret == 0) {
+ /* Interrupt URB now set up; initiate sync sequence */
+ sierra_net_dosync(dev);
+ }
+ }
+ return ret;
+}
+
#define DIRECT_IP_DEVICE(vend, prod) \
{USB_DEVICE_INTERFACE_NUMBER(vend, prod, 7), \
.driver_info = (unsigned long)&sierra_net_info_direct_ip}, \
static struct usb_driver sierra_net_driver = {
.name = "sierra_net",
.id_table = products,
- .probe = usbnet_probe,
+ .probe = sierra_net_probe,
.disconnect = usbnet_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
return 0;
}
+/* Submit the interrupt URB if not previously submitted, increasing refcount */
+int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags)
+{
+ int ret = 0;
+
+ WARN_ON_ONCE(dev->interrupt == NULL);
+ if (dev->interrupt) {
+ mutex_lock(&dev->interrupt_mutex);
+
+ if (++dev->interrupt_count == 1)
+ ret = usb_submit_urb(dev->interrupt, mem_flags);
+
+ dev_dbg(&dev->udev->dev, "incremented interrupt URB count to %d\n",
+ dev->interrupt_count);
+ mutex_unlock(&dev->interrupt_mutex);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usbnet_status_start);
+
+/* For resume; submit interrupt URB if previously submitted */
+static int __usbnet_status_start_force(struct usbnet *dev, gfp_t mem_flags)
+{
+ int ret = 0;
+
+ mutex_lock(&dev->interrupt_mutex);
+ if (dev->interrupt_count) {
+ ret = usb_submit_urb(dev->interrupt, mem_flags);
+ dev_dbg(&dev->udev->dev,
+ "submitted interrupt URB for resume\n");
+ }
+ mutex_unlock(&dev->interrupt_mutex);
+ return ret;
+}
+
+/* Kill the interrupt URB if all submitters want it killed */
+void usbnet_status_stop(struct usbnet *dev)
+{
+ if (dev->interrupt) {
+ mutex_lock(&dev->interrupt_mutex);
+ WARN_ON(dev->interrupt_count == 0);
+
+ if (dev->interrupt_count && --dev->interrupt_count == 0)
+ usb_kill_urb(dev->interrupt);
+
+ dev_dbg(&dev->udev->dev,
+ "decremented interrupt URB count to %d\n",
+ dev->interrupt_count);
+ mutex_unlock(&dev->interrupt_mutex);
+ }
+}
+EXPORT_SYMBOL_GPL(usbnet_status_stop);
+
+/* For suspend; always kill interrupt URB */
+static void __usbnet_status_stop_force(struct usbnet *dev)
+{
+ if (dev->interrupt) {
+ mutex_lock(&dev->interrupt_mutex);
+ usb_kill_urb(dev->interrupt);
+ dev_dbg(&dev->udev->dev, "killed interrupt URB for suspend\n");
+ mutex_unlock(&dev->interrupt_mutex);
+ }
+}
+
/* Passes this packet up the stack, updating its accounting.
* Some link protocols batch packets, so their rx_fixup paths
* can return clones as well as just modify the original skb.
if (!(info->flags & FLAG_AVOID_UNLINK_URBS))
usbnet_terminate_urbs(dev);
- usb_kill_urb(dev->interrupt);
+ usbnet_status_stop(dev);
usbnet_purge_paused_rxq(dev);
/* start any status interrupt transfer */
if (dev->interrupt) {
- retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
+ retval = usbnet_status_start(dev, GFP_KERNEL);
if (retval < 0) {
netif_err(dev, ifup, dev->net,
"intr submit %d\n", retval);
dev->delay.data = (unsigned long) dev;
init_timer (&dev->delay);
mutex_init (&dev->phy_mutex);
+ mutex_init(&dev->interrupt_mutex);
+ dev->interrupt_count = 0;
dev->net = net;
strcpy (net->name, "usb%d");
*/
netif_device_detach (dev->net);
usbnet_terminate_urbs(dev);
- usb_kill_urb(dev->interrupt);
+ __usbnet_status_stop_force(dev);
/*
* reattach so runtime management can use and
int retval;
if (!--dev->suspend_count) {
- /* resume interrupt URBs */
- if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags))
- usb_submit_urb(dev->interrupt, GFP_NOIO);
+ /* resume interrupt URB if it was previously submitted */
+ __usbnet_status_start_force(dev, GFP_NOIO);
spin_lock_irq(&dev->txq.lock);
while ((res = usb_get_from_anchor(&dev->deferred))) {
phy = get_phy_device(mdio, addr, is_c45);
if (!phy || IS_ERR(phy)) {
- phy = phy_device_create(mdio, addr, 0, false, NULL);
- if (!phy || IS_ERR(phy)) {
- dev_err(&mdio->dev,
- "error creating PHY at address %i\n",
- addr);
- continue;
- }
+ dev_err(&mdio->dev,
+ "cannot get PHY at address %i\n",
+ addr);
+ continue;
}
/* Associate the OF node with the device structure so it
* Can not put in pci_device_add yet because resources
* are not assigned yet for some devices.
*/
+ pci_fixup_device(pci_fixup_final, dev);
pci_create_sysfs_dev_files(dev);
dev->match_driver = true;
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
entry->msi_attrib.pos = dev->msi_cap;
- entry->mask_pos = dev->msi_cap + (control & PCI_MSI_FLAGS_64BIT) ?
- PCI_MSI_MASK_64 : PCI_MSI_MASK_32;
+ if (control & PCI_MSI_FLAGS_64BIT)
+ entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
+ else
+ entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
/* All MSIs are unmasked by default, Mask them all */
if (entry->msi_attrib.maskbit)
pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
list_add_tail(&dev->bus_list, &bus->devices);
up_write(&pci_bus_sem);
- pci_fixup_device(pci_fixup_final, dev);
ret = pcibios_add_device(dev);
WARN_ON(ret < 0);
config PINCTRL_SH_PFC
# XXX move off the gpio dependency
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select GPIO_SH_PFC if ARCH_REQUIRE_GPIOLIB
select PINMUX
select PINCONF
config PINCTRL_PFC_SH7203
def_bool y
depends on CPU_SUBTYPE_SH7203
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7264
def_bool y
depends on CPU_SUBTYPE_SH7264
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7269
def_bool y
depends on CPU_SUBTYPE_SH7269
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7372
config PINCTRL_PFC_SH7720
def_bool y
depends on CPU_SUBTYPE_SH7720
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7722
def_bool y
depends on CPU_SUBTYPE_SH7722
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7723
def_bool y
depends on CPU_SUBTYPE_SH7723
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7724
def_bool y
depends on CPU_SUBTYPE_SH7724
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7734
def_bool y
depends on CPU_SUBTYPE_SH7734
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7757
def_bool y
depends on CPU_SUBTYPE_SH7757
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7785
def_bool y
depends on CPU_SUBTYPE_SH7785
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SH7786
def_bool y
depends on CPU_SUBTYPE_SH7786
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
config PINCTRL_PFC_SHX3
def_bool y
depends on CPU_SUBTYPE_SHX3
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select PINCTRL_SH_PFC
endif
config REGULATOR_GPIO
tristate "GPIO regulator support"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
This driver provides support for regulators that can be
controlled via gpios.
*/
static void __exit tile_rtc_driver_exit(void)
{
+ platform_device_unregister(tile_rtc_platform_device);
platform_driver_unregister(&tile_rtc_platform_driver);
}
config SPI_ATH79
tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
- depends on ATH79 && GENERIC_GPIO
+ depends on ATH79 && GPIOLIB
select SPI_BITBANG
help
This enables support for the SPI controller present on the
config SPI_GPIO
tristate "GPIO-based bitbanging SPI Master"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select SPI_BITBANG
help
This simple GPIO bitbanging SPI master uses the arch-neutral GPIO
config SPI_OC_TINY
tristate "OpenCores tiny SPI"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select SPI_BITBANG
help
This is the driver for OpenCores tiny SPI master controller.
config SPI_TXX9
tristate "Toshiba TXx9 SPI controller"
- depends on GENERIC_GPIO && CPU_TX49XX
+ depends on GPIOLIB && CPU_TX49XX
help
SPI driver for Toshiba TXx9 MIPS SoCs
#include "ssb_private.h"
-static const char *part_probes[] = { "bcm47xxpart", NULL };
+static const char * const part_probes[] = { "bcm47xxpart", NULL };
static struct physmap_flash_data ssb_pflash_data = {
.part_probe_types = part_probes,
config ANDROID_TIMED_GPIO
tristate "Android timed gpio driver"
- depends on GENERIC_GPIO && ANDROID_TIMED_OUTPUT
+ depends on GPIOLIB && ANDROID_TIMED_OUTPUT
default n
config ANDROID_LOW_MEMORY_KILLER
depends on SPI
select IIO_TRIGGER if IIO_BUFFER
depends on !IIO_BUFFER || IIO_KFIFO_BUF
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Say yes here to build SPI support for the ST microelectronics
accelerometer. The driver supplies direct access via sysfs files
config AD7816
tristate "Analog Devices AD7816/7/8 temperature sensor and ADC driver"
depends on SPI
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Say yes here to build support for Analog Devices AD7816/7/8
temperature sensors and ADC.
config ADT7316
tristate "Analog Devices ADT7316/7/8 ADT7516/7/9 temperature sensor, ADC and DAC driver"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Say yes here to build support for Analog Devices ADT7316, ADT7317, ADT7318
and ADT7516, ADT7517, ADT7519 temperature sensors, ADC and DAC.
config AD2S1200
tristate "Analog Devices ad2s1200/ad2s1205 driver"
depends on SPI
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Say yes here to build support for Analog Devices spi resolver
to digital converters, ad2s1200 and ad2s1205, provides direct access
config AD2S1210
tristate "Analog Devices ad2s1210 driver"
depends on SPI
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Say yes here to build support for Analog Devices spi resolver
to digital converters, ad2s1210, provides direct access via sysfs.
config IIO_GPIO_TRIGGER
tristate "GPIO trigger"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Provides support for using GPIO pins as IIO triggers.
Enable this to let the user space manage the platform thermals.
config CPU_THERMAL
- tristate "generic cpu cooling support"
+ bool "generic cpu cooling support"
depends on CPU_FREQ
select CPU_FREQ_TABLE
help
This implements the generic cpu cooling mechanism through frequency
- reduction, cpu hotplug and any other ways of reducing temperature. An
- ACPI version of this already exists(drivers/acpi/processor_thermal.c).
+ reduction. An ACPI version of this already exists
+ (drivers/acpi/processor_thermal.c).
This will be useful for platforms using the generic thermal interface
and not the ACPI interface.
+
If you want this support, you should say Y here.
config THERMAL_EMULATION
user can manually input temperature and test the different trip
threshold behaviour for simulation purpose.
+ WARNING: Be careful while enabling this option on production systems,
+ because userland can easily disable the thermal policy by simply
+ flooding this sysfs node with low temperature values.
+
config SPEAR_THERMAL
bool "SPEAr thermal sensor driver"
depends on PLAT_SPEAR
If you say yes here you get support for TMU (Thermal Management
Unit) on SAMSUNG EXYNOS series of SoC.
-config EXYNOS_THERMAL_EMUL
- bool "EXYNOS TMU emulation mode support"
- depends on EXYNOS_THERMAL
- help
- Exynos 4412 and 4414 and 5 series has emulation mode on TMU.
- Enable this option will be make sysfs node in exynos thermal platform
- device directory to support emulation mode. With emulation mode sysfs
- node, you can manually input temperature to TMU for simulation purpose.
-
config DOVE_THERMAL
tristate "Temperature sensor on Marvell Dove SoCs"
depends on ARCH_DOVE
created. Cooling devices can be bound to the trip points to cool this
thermal zone if trip points reached.
+config ARMADA_THERMAL
+ tristate "Armada 370/XP thermal management"
+ depends on ARCH_MVEBU
+ depends on OF
+ help
+ Enable this option if you want to have support for thermal management
+ controller present in Armada 370 and Armada XP SoC.
+
config DB8500_CPUFREQ_COOLING
tristate "DB8500 cpufreq cooling"
depends on ARCH_U8500
#
obj-$(CONFIG_THERMAL) += thermal_sys.o
+thermal_sys-y += thermal_core.o
# governors
-obj-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
-obj-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o
-obj-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
+thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
+thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o
+thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
# cpufreq cooling
-obj-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
+thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
# platform thermal drivers
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o
obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o
obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o
+obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o
obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
--- /dev/null
+/*
+ * Marvell Armada 370/XP thermal sensor driver
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/thermal.h>
+
+#define THERMAL_VALID_OFFSET 9
+#define THERMAL_VALID_MASK 0x1
+#define THERMAL_TEMP_OFFSET 10
+#define THERMAL_TEMP_MASK 0x1ff
+
+/* Thermal Manager Control and Status Register */
+#define PMU_TDC0_SW_RST_MASK (0x1 << 1)
+#define PMU_TM_DISABLE_OFFS 0
+#define PMU_TM_DISABLE_MASK (0x1 << PMU_TM_DISABLE_OFFS)
+#define PMU_TDC0_REF_CAL_CNT_OFFS 11
+#define PMU_TDC0_REF_CAL_CNT_MASK (0x1ff << PMU_TDC0_REF_CAL_CNT_OFFS)
+#define PMU_TDC0_OTF_CAL_MASK (0x1 << 30)
+#define PMU_TDC0_START_CAL_MASK (0x1 << 25)
+
+struct armada_thermal_ops;
+
+/* Marvell EBU Thermal Sensor Dev Structure */
+struct armada_thermal_priv {
+ void __iomem *sensor;
+ void __iomem *control;
+ struct armada_thermal_ops *ops;
+};
+
+struct armada_thermal_ops {
+ /* Initialize the sensor */
+ void (*init_sensor)(struct armada_thermal_priv *);
+
+ /* Test for a valid sensor value (optional) */
+ bool (*is_valid)(struct armada_thermal_priv *);
+};
+
+static void armadaxp_init_sensor(struct armada_thermal_priv *priv)
+{
+ unsigned long reg;
+
+ reg = readl_relaxed(priv->control);
+ reg |= PMU_TDC0_OTF_CAL_MASK;
+ writel(reg, priv->control);
+
+ /* Reference calibration value */
+ reg &= ~PMU_TDC0_REF_CAL_CNT_MASK;
+ reg |= (0xf1 << PMU_TDC0_REF_CAL_CNT_OFFS);
+ writel(reg, priv->control);
+
+ /* Reset the sensor */
+ reg = readl_relaxed(priv->control);
+ writel((reg | PMU_TDC0_SW_RST_MASK), priv->control);
+
+ writel(reg, priv->control);
+
+ /* Enable the sensor */
+ reg = readl_relaxed(priv->sensor);
+ reg &= ~PMU_TM_DISABLE_MASK;
+ writel(reg, priv->sensor);
+}
+
+static void armada370_init_sensor(struct armada_thermal_priv *priv)
+{
+ unsigned long reg;
+
+ reg = readl_relaxed(priv->control);
+ reg |= PMU_TDC0_OTF_CAL_MASK;
+ writel(reg, priv->control);
+
+ /* Reference calibration value */
+ reg &= ~PMU_TDC0_REF_CAL_CNT_MASK;
+ reg |= (0xf1 << PMU_TDC0_REF_CAL_CNT_OFFS);
+ writel(reg, priv->control);
+
+ reg &= ~PMU_TDC0_START_CAL_MASK;
+ writel(reg, priv->control);
+
+ mdelay(10);
+}
+
+static bool armada_is_valid(struct armada_thermal_priv *priv)
+{
+ unsigned long reg = readl_relaxed(priv->sensor);
+
+ return (reg >> THERMAL_VALID_OFFSET) & THERMAL_VALID_MASK;
+}
+
+static int armada_get_temp(struct thermal_zone_device *thermal,
+ unsigned long *temp)
+{
+ struct armada_thermal_priv *priv = thermal->devdata;
+ unsigned long reg;
+
+ /* Valid check */
+ if (priv->ops->is_valid && !priv->ops->is_valid(priv)) {
+ dev_err(&thermal->device,
+ "Temperature sensor reading not valid\n");
+ return -EIO;
+ }
+
+ reg = readl_relaxed(priv->sensor);
+ reg = (reg >> THERMAL_TEMP_OFFSET) & THERMAL_TEMP_MASK;
+ *temp = (3153000000UL - (10000000UL*reg)) / 13825;
+ return 0;
+}
+
+static struct thermal_zone_device_ops ops = {
+ .get_temp = armada_get_temp,
+};
+
+static const struct armada_thermal_ops armadaxp_ops = {
+ .init_sensor = armadaxp_init_sensor,
+};
+
+static const struct armada_thermal_ops armada370_ops = {
+ .is_valid = armada_is_valid,
+ .init_sensor = armada370_init_sensor,
+};
+
+static const struct of_device_id armada_thermal_id_table[] = {
+ {
+ .compatible = "marvell,armadaxp-thermal",
+ .data = &armadaxp_ops,
+ },
+ {
+ .compatible = "marvell,armada370-thermal",
+ .data = &armada370_ops,
+ },
+ {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, armada_thermal_id_table);
+
+static int armada_thermal_probe(struct platform_device *pdev)
+{
+ struct thermal_zone_device *thermal;
+ const struct of_device_id *match;
+ struct armada_thermal_priv *priv;
+ struct resource *res;
+
+ match = of_match_device(armada_thermal_id_table, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get platform resource\n");
+ return -ENODEV;
+ }
+
+ priv->sensor = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->sensor))
+ return PTR_ERR(priv->sensor);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get platform resource\n");
+ return -ENODEV;
+ }
+
+ priv->control = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->control))
+ return PTR_ERR(priv->control);
+
+ priv->ops = (struct armada_thermal_ops *)match->data;
+ priv->ops->init_sensor(priv);
+
+ thermal = thermal_zone_device_register("armada_thermal", 0, 0,
+ priv, &ops, NULL, 0, 0);
+ if (IS_ERR(thermal)) {
+ dev_err(&pdev->dev,
+ "Failed to register thermal zone device\n");
+ return PTR_ERR(thermal);
+ }
+
+ platform_set_drvdata(pdev, thermal);
+
+ return 0;
+}
+
+static int armada_thermal_exit(struct platform_device *pdev)
+{
+ struct thermal_zone_device *armada_thermal =
+ platform_get_drvdata(pdev);
+
+ thermal_zone_device_unregister(armada_thermal);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver armada_thermal_driver = {
+ .probe = armada_thermal_probe,
+ .remove = armada_thermal_exit,
+ .driver = {
+ .name = "armada_thermal",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(armada_thermal_id_table),
+ },
+};
+
+module_platform_driver(armada_thermal_driver);
+
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel.garcia@free-electrons.com>");
+MODULE_DESCRIPTION("Armada 370/XP thermal driver");
+MODULE_LICENSE("GPL v2");
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/thermal.h>
-#include <linux/platform_device.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/cpu_cooling.h>
/**
- * struct cpufreq_cooling_device
+ * struct cpufreq_cooling_device - data for cooling device with cpufreq
* @id: unique integer value corresponding to each cpufreq_cooling_device
* registered.
- * @cool_dev: thermal_cooling_device pointer to keep track of the the
- * egistered cooling device.
+ * @cool_dev: thermal_cooling_device pointer to keep track of the
+ * registered cooling device.
* @cpufreq_state: integer value representing the current state of cpufreq
* cooling devices.
* @cpufreq_val: integer value representing the absolute value of the clipped
* frequency.
* @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
- * @node: list_head to link all cpufreq_cooling_device together.
*
* This structure is required for keeping information of each
- * cpufreq_cooling_device registered as a list whose head is represented by
- * cooling_cpufreq_list. In order to prevent corruption of this list a
+ * cpufreq_cooling_device registered. In order to prevent corruption of this a
* mutex lock cooling_cpufreq_lock is used.
*/
struct cpufreq_cooling_device {
unsigned int cpufreq_state;
unsigned int cpufreq_val;
struct cpumask allowed_cpus;
- struct list_head node;
};
-static LIST_HEAD(cooling_cpufreq_list);
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
* get_idr - function to get a unique id.
* @idr: struct idr * handle used to create a id.
* @id: int * value generated by this function.
+ *
+ * This function will populate @id with an unique
+ * id, using the idr API.
+ *
+ * Return: 0 on success, an error code on failure.
*/
static int get_idr(struct idr *idr, int *id)
{
if (unlikely(ret < 0))
return ret;
*id = ret;
+
return 0;
}
/* Below code defines functions to be used for cpufreq as cooling device */
/**
- * is_cpufreq_valid - function to check if a cpu has frequency transition policy.
+ * is_cpufreq_valid - function to check frequency transitioning capability.
* @cpu: cpu for which check is needed.
+ *
+ * This function will check the current state of the system if
+ * it is capable of changing the frequency for a given @cpu.
+ *
+ * Return: 0 if the system is not currently capable of changing
+ * the frequency of given cpu. !0 in case the frequency is changeable.
*/
static int is_cpufreq_valid(int cpu)
{
struct cpufreq_policy policy;
+
return !cpufreq_get_policy(&policy, cpu);
}
+enum cpufreq_cooling_property {
+ GET_LEVEL,
+ GET_FREQ,
+ GET_MAXL,
+};
+
/**
- * get_cpu_frequency - get the absolute value of frequency from level.
- * @cpu: cpu for which frequency is fetched.
- * @level: level of frequency, equals cooling state of cpu cooling device
- * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
+ * get_property - fetch a property of interest for a give cpu.
+ * @cpu: cpu for which the property is required
+ * @input: query parameter
+ * @output: query return
+ * @property: type of query (frequency, level, max level)
+ *
+ * This is the common function to
+ * 1. get maximum cpu cooling states
+ * 2. translate frequency to cooling state
+ * 3. translate cooling state to frequency
+ * Note that the code may be not in good shape
+ * but it is written in this way in order to:
+ * a) reduce duplicate code as most of the code can be shared.
+ * b) make sure the logic is consistent when translating between
+ * cooling states and frequencies.
+ *
+ * Return: 0 on success, -EINVAL when invalid parameters are passed.
*/
-static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
+static int get_property(unsigned int cpu, unsigned long input,
+ unsigned int *output,
+ enum cpufreq_cooling_property property)
{
- int ret = 0, i = 0;
- unsigned long level_index;
- bool descend = false;
+ int i, j;
+ unsigned long max_level = 0, level = 0;
+ unsigned int freq = CPUFREQ_ENTRY_INVALID;
+ int descend = -1;
struct cpufreq_frequency_table *table =
cpufreq_frequency_get_table(cpu);
+
+ if (!output)
+ return -EINVAL;
+
if (!table)
- return ret;
+ return -EINVAL;
- while (table[i].frequency != CPUFREQ_TABLE_END) {
+ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ /* ignore invalid entries */
if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
continue;
- /*check if table in ascending or descending order*/
- if ((table[i + 1].frequency != CPUFREQ_TABLE_END) &&
- (table[i + 1].frequency < table[i].frequency)
- && !descend) {
- descend = true;
- }
+ /* ignore duplicate entry */
+ if (freq == table[i].frequency)
+ continue;
+
+ /* get the frequency order */
+ if (freq != CPUFREQ_ENTRY_INVALID && descend != -1)
+ descend = !!(freq > table[i].frequency);
- /*return if level matched and table in descending order*/
- if (descend && i == level)
- return table[i].frequency;
- i++;
+ freq = table[i].frequency;
+ max_level++;
}
- i--;
- if (level > i || descend)
- return ret;
- level_index = i - level;
+ /* get max level */
+ if (property == GET_MAXL) {
+ *output = (unsigned int)max_level;
+ return 0;
+ }
- /*Scan the table in reverse order and match the level*/
- while (i >= 0) {
+ if (property == GET_FREQ)
+ level = descend ? input : (max_level - input - 1);
+
+ for (i = 0, j = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ /* ignore invalid entry */
if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
continue;
- /*return if level matched*/
- if (i == level_index)
- return table[i].frequency;
- i--;
+
+ /* ignore duplicate entry */
+ if (freq == table[i].frequency)
+ continue;
+
+ /* now we have a valid frequency entry */
+ freq = table[i].frequency;
+
+ if (property == GET_LEVEL && (unsigned int)input == freq) {
+ /* get level by frequency */
+ *output = descend ? j : (max_level - j - 1);
+ return 0;
+ }
+ if (property == GET_FREQ && level == j) {
+ /* get frequency by level */
+ *output = freq;
+ return 0;
+ }
+ j++;
}
- return ret;
+
+ return -EINVAL;
+}
+
+/**
+ * cpufreq_cooling_get_level - for a give cpu, return the cooling level.
+ * @cpu: cpu for which the level is required
+ * @freq: the frequency of interest
+ *
+ * This function will match the cooling level corresponding to the
+ * requested @freq and return it.
+ *
+ * Return: The matched cooling level on success or THERMAL_CSTATE_INVALID
+ * otherwise.
+ */
+unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
+{
+ unsigned int val;
+
+ if (get_property(cpu, (unsigned long)freq, &val, GET_LEVEL))
+ return THERMAL_CSTATE_INVALID;
+
+ return (unsigned long)val;
+}
+EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
+
+/**
+ * get_cpu_frequency - get the absolute value of frequency from level.
+ * @cpu: cpu for which frequency is fetched.
+ * @level: cooling level
+ *
+ * This function matches cooling level with frequency. Based on a cooling level
+ * of frequency, equals cooling state of cpu cooling device, it will return
+ * the corresponding frequency.
+ * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
+ *
+ * Return: 0 on error, the corresponding frequency otherwise.
+ */
+static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
+{
+ int ret = 0;
+ unsigned int freq;
+
+ ret = get_property(cpu, level, &freq, GET_FREQ);
+ if (ret)
+ return 0;
+
+ return freq;
}
/**
* @cpufreq_device: cpufreq_cooling_device pointer containing frequency
* clipping data.
* @cooling_state: value of the cooling state.
+ *
+ * Function used to make sure the cpufreq layer is aware of current thermal
+ * limits. The limits are applied by updating the cpufreq policy.
+ *
+ * Return: 0 on success, an error code otherwise (-EINVAL in case wrong
+ * cooling state).
*/
static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
- unsigned long cooling_state)
+ unsigned long cooling_state)
{
unsigned int cpuid, clip_freq;
- struct cpumask *maskPtr = &cpufreq_device->allowed_cpus;
- unsigned int cpu = cpumask_any(maskPtr);
+ struct cpumask *mask = &cpufreq_device->allowed_cpus;
+ unsigned int cpu = cpumask_any(mask);
/* Check if the old cooling action is same as new cooling action */
cpufreq_device->cpufreq_val = clip_freq;
notify_device = cpufreq_device;
- for_each_cpu(cpuid, maskPtr) {
+ for_each_cpu(cpuid, mask) {
if (is_cpufreq_valid(cpuid))
cpufreq_update_policy(cpuid);
}
* @nb: struct notifier_block * with callback info.
* @event: value showing cpufreq event for which this function invoked.
* @data: callback-specific data
+ *
+ * Callback to highjack the notification on cpufreq policy transition.
+ * Every time there is a change in policy, we will intercept and
+ * update the cpufreq policy with thermal constraints.
+ *
+ * Return: 0 (success)
*/
static int cpufreq_thermal_notifier(struct notifier_block *nb,
- unsigned long event, void *data)
+ unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
unsigned long max_freq = 0;
if (cpumask_test_cpu(policy->cpu, ¬ify_device->allowed_cpus))
max_freq = notify_device->cpufreq_val;
- /* Never exceed user_policy.max*/
+ /* Never exceed user_policy.max */
if (max_freq > policy->user_policy.max)
max_freq = policy->user_policy.max;
return 0;
}
-/*
- * cpufreq cooling device callback functions are defined below
- */
+/* cpufreq cooling device callback functions are defined below */
/**
* cpufreq_get_max_state - callback function to get the max cooling state.
* @cdev: thermal cooling device pointer.
* @state: fill this variable with the max cooling state.
+ *
+ * Callback for the thermal cooling device to return the cpufreq
+ * max cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
*/
static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
- struct cpumask *maskPtr = &cpufreq_device->allowed_cpus;
+ struct cpumask *mask = &cpufreq_device->allowed_cpus;
unsigned int cpu;
- struct cpufreq_frequency_table *table;
- unsigned long count = 0;
- int i = 0;
-
- cpu = cpumask_any(maskPtr);
- table = cpufreq_frequency_get_table(cpu);
- if (!table) {
- *state = 0;
- return 0;
- }
+ unsigned int count = 0;
+ int ret;
- for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
- if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
- continue;
- count++;
- }
+ cpu = cpumask_any(mask);
- if (count > 0) {
- *state = --count;
- return 0;
- }
+ ret = get_property(cpu, 0, &count, GET_MAXL);
- return -EINVAL;
+ if (count > 0)
+ *state = count;
+
+ return ret;
}
/**
* cpufreq_get_cur_state - callback function to get the current cooling state.
* @cdev: thermal cooling device pointer.
* @state: fill this variable with the current cooling state.
+ *
+ * Callback for the thermal cooling device to return the cpufreq
+ * current cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
*/
static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
*state = cpufreq_device->cpufreq_state;
+
return 0;
}
* cpufreq_set_cur_state - callback function to set the current cooling state.
* @cdev: thermal cooling device pointer.
* @state: set this variable to the current cooling state.
+ *
+ * Callback for the thermal cooling device to change the cpufreq
+ * current cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
*/
static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
* @clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ *
+ * This interface function registers the cpufreq cooling device with the name
+ * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
+ * cooling devices.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
*/
-struct thermal_cooling_device *cpufreq_cooling_register(
- const struct cpumask *clip_cpus)
+struct thermal_cooling_device *
+cpufreq_cooling_register(const struct cpumask *clip_cpus)
{
struct thermal_cooling_device *cool_dev;
struct cpufreq_cooling_device *cpufreq_dev = NULL;
int ret = 0, i;
struct cpufreq_policy policy;
- /*Verify that all the clip cpus have same freq_min, freq_max limit*/
+ /* Verify that all the clip cpus have same freq_min, freq_max limit */
for_each_cpu(i, clip_cpus) {
- /*continue if cpufreq policy not found and not return error*/
+ /* continue if cpufreq policy not found and not return error */
if (!cpufreq_get_policy(&policy, i))
continue;
if (min == 0 && max == 0) {
max = policy.cpuinfo.max_freq;
} else {
if (min != policy.cpuinfo.min_freq ||
- max != policy.cpuinfo.max_freq)
+ max != policy.cpuinfo.max_freq)
return ERR_PTR(-EINVAL);
}
}
cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!cpufreq_dev)
return ERR_PTR(-ENOMEM);
return ERR_PTR(-EINVAL);
}
- sprintf(dev_name, "thermal-cpufreq-%d", cpufreq_dev->id);
+ snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
+ cpufreq_dev->id);
cool_dev = thermal_cooling_device_register(dev_name, cpufreq_dev,
- &cpufreq_cooling_ops);
+ &cpufreq_cooling_ops);
if (!cool_dev) {
release_idr(&cpufreq_idr, cpufreq_dev->id);
kfree(cpufreq_dev);
/* Register the notifier for first cpufreq cooling device */
if (cpufreq_dev_count == 0)
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
- CPUFREQ_POLICY_NOTIFIER);
+ CPUFREQ_POLICY_NOTIFIER);
cpufreq_dev_count++;
mutex_unlock(&cooling_cpufreq_lock);
+
return cool_dev;
}
-EXPORT_SYMBOL(cpufreq_cooling_register);
+EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
* @cdev: thermal cooling device pointer.
+ *
+ * This interface function unregisters the "thermal-cpufreq-%x" cooling device.
*/
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
cpufreq_dev_count--;
/* Unregister the notifier for the last cpufreq cooling device */
- if (cpufreq_dev_count == 0) {
+ if (cpufreq_dev_count == 0)
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
- CPUFREQ_POLICY_NOTIFIER);
- }
+ CPUFREQ_POLICY_NOTIFIER);
mutex_unlock(&cooling_cpufreq_lock);
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
release_idr(&cpufreq_idr, cpufreq_dev->id);
kfree(cpufreq_dev);
}
-EXPORT_SYMBOL(cpufreq_cooling_unregister);
+EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
cpumask_set_cpu(0, &mask_val);
cdev = cpufreq_cooling_register(&mask_val);
- if (IS_ERR_OR_NULL(cdev)) {
+ if (IS_ERR(cdev)) {
dev_err(&pdev->dev, "Failed to register cooling device\n");
return PTR_ERR(cdev);
}
low_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_LOW");
if (low_irq < 0) {
dev_err(&pdev->dev, "Get IRQ_HOTMON_LOW failed.\n");
- return low_irq;
+ ret = low_irq;
+ goto out_unlock;
}
ret = devm_request_threaded_irq(&pdev->dev, low_irq, NULL,
"dbx500_temp_low", pzone);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to allocate temp low irq.\n");
- return ret;
+ goto out_unlock;
}
high_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_HIGH");
if (high_irq < 0) {
dev_err(&pdev->dev, "Get IRQ_HOTMON_HIGH failed.\n");
- return high_irq;
+ ret = high_irq;
+ goto out_unlock;
}
ret = devm_request_threaded_irq(&pdev->dev, high_irq, NULL,
"dbx500_temp_high", pzone);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to allocate temp high irq.\n");
- return ret;
+ goto out_unlock;
}
pzone->therm_dev = thermal_zone_device_register("db8500_thermal_zone",
ptrips->num_trips, 0, pzone, &thdev_ops, NULL, 0, 0);
- if (IS_ERR_OR_NULL(pzone->therm_dev)) {
+ if (IS_ERR(pzone->therm_dev)) {
dev_err(&pdev->dev, "Register thermal zone device failed.\n");
- return PTR_ERR(pzone->therm_dev);
+ ret = PTR_ERR(pzone->therm_dev);
+ goto out_unlock;
}
dev_info(&pdev->dev, "Thermal zone device registered.\n");
platform_set_drvdata(pdev, pzone);
pzone->mode = THERMAL_DEVICE_ENABLED;
+
+out_unlock:
mutex_unlock(&pzone->th_lock);
- return 0;
+ return ret;
}
static int db8500_thermal_remove(struct platform_device *pdev)
}
/*
- * Calculate temperature. See Section 8.10.1 of 88AP510,
- * Documentation/arm/Marvell/README
+ * Calculate temperature. According to Marvell internal
+ * documentation the formula for this is:
+ * Celsius = (322-reg)/1.3625
*/
reg = readl_relaxed(priv->sensor);
reg = (reg >> DOVE_THERMAL_TEMP_OFFSET) & DOVE_THERMAL_TEMP_MASK;
- *temp = ((2281638UL - (7298*reg)) / 10);
+ *temp = ((3220000000UL - (10000000UL * reg)) / 13625);
return 0;
}
#define IDLE_INTERVAL 10000
#define MCELSIUS 1000
-#ifdef CONFIG_EXYNOS_THERMAL_EMUL
+#ifdef CONFIG_THERMAL_EMULATION
#define EXYNOS_EMUL_TIME 0x57F0
#define EXYNOS_EMUL_TIME_SHIFT 16
#define EXYNOS_EMUL_DATA_SHIFT 8
#define EXYNOS_EMUL_DATA_MASK 0xFF
#define EXYNOS_EMUL_ENABLE 0x1
-#endif /* CONFIG_EXYNOS_THERMAL_EMUL */
+#endif /* CONFIG_THERMAL_EMULATION */
/* CPU Zone information */
#define PANIC_ZONE 4
struct thermal_sensor_conf {
char name[SENSOR_NAME_LEN];
int (*read_temperature)(void *data);
+ int (*write_emul_temp)(void *drv_data, unsigned long temp);
struct thermal_trip_point_conf trip_data;
struct thermal_cooling_conf cooling_data;
void *private_data;
return ret;
}
-static int exynos_get_frequency_level(unsigned int cpu, unsigned int freq)
-{
- int i = 0, ret = -EINVAL;
- struct cpufreq_frequency_table *table = NULL;
-#ifdef CONFIG_CPU_FREQ
- table = cpufreq_frequency_get_table(cpu);
-#endif
- if (!table)
- return ret;
-
- while (table[i].frequency != CPUFREQ_TABLE_END) {
- if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
- continue;
- if (table[i].frequency == freq)
- return i;
- i++;
- }
- return ret;
-}
-
/* Bind callback functions for thermal zone */
static int exynos_bind(struct thermal_zone_device *thermal,
struct thermal_cooling_device *cdev)
/* Bind the thermal zone to the cpufreq cooling device */
for (i = 0; i < tab_size; i++) {
clip_data = (struct freq_clip_table *)&(tab_ptr[i]);
- level = exynos_get_frequency_level(0, clip_data->freq_clip_max);
- if (level < 0)
+ level = cpufreq_cooling_get_level(0, clip_data->freq_clip_max);
+ if (level == THERMAL_CSTATE_INVALID)
return 0;
switch (GET_ZONE(i)) {
case MONITOR_ZONE:
return 0;
}
+/* Get temperature callback functions for thermal zone */
+static int exynos_set_emul_temp(struct thermal_zone_device *thermal,
+ unsigned long temp)
+{
+ void *data;
+ int ret = -EINVAL;
+
+ if (!th_zone->sensor_conf) {
+ pr_info("Temperature sensor not initialised\n");
+ return -EINVAL;
+ }
+ data = th_zone->sensor_conf->private_data;
+ if (th_zone->sensor_conf->write_emul_temp)
+ ret = th_zone->sensor_conf->write_emul_temp(data, temp);
+ return ret;
+}
+
/* Get the temperature trend */
static int exynos_get_trend(struct thermal_zone_device *thermal,
int trip, enum thermal_trend *trend)
.bind = exynos_bind,
.unbind = exynos_unbind,
.get_temp = exynos_get_temp,
+ .set_emul_temp = exynos_set_emul_temp,
.get_trend = exynos_get_trend,
.get_mode = exynos_get_mode,
.set_mode = exynos_set_mode,
return temp;
}
+#ifdef CONFIG_THERMAL_EMULATION
+static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
+{
+ struct exynos_tmu_data *data = drv_data;
+ unsigned int reg;
+ int ret = -EINVAL;
+
+ if (data->soc == SOC_ARCH_EXYNOS4210)
+ goto out;
+
+ if (temp && temp < MCELSIUS)
+ goto out;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ reg = readl(data->base + EXYNOS_EMUL_CON);
+
+ if (temp) {
+ temp /= MCELSIUS;
+
+ reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) |
+ (temp_to_code(data, temp)
+ << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE;
+ } else {
+ reg &= ~EXYNOS_EMUL_ENABLE;
+ }
+
+ writel(reg, data->base + EXYNOS_EMUL_CON);
+
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+ return 0;
+out:
+ return ret;
+}
+#else
+static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
+ { return -EINVAL; }
+#endif/*CONFIG_THERMAL_EMULATION*/
+
static void exynos_tmu_work(struct work_struct *work)
{
struct exynos_tmu_data *data = container_of(work,
static struct thermal_sensor_conf exynos_sensor_conf = {
.name = "exynos-therm",
.read_temperature = (int (*)(void *))exynos_tmu_read,
+ .write_emul_temp = exynos_tmu_set_emulation,
};
#if defined(CONFIG_CPU_EXYNOS4210)
.compatible = "samsung,exynos4210-tmu",
.data = (void *)EXYNOS4210_TMU_DRV_DATA,
},
+ {
+ .compatible = "samsung,exynos4412-tmu",
+ .data = (void *)EXYNOS_TMU_DRV_DATA,
+ },
{
.compatible = "samsung,exynos5250-tmu",
.data = (void *)EXYNOS_TMU_DRV_DATA,
platform_get_device_id(pdev)->driver_data;
}
-#ifdef CONFIG_EXYNOS_THERMAL_EMUL
-static ssize_t exynos_tmu_emulation_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct platform_device *pdev = container_of(dev,
- struct platform_device, dev);
- struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- unsigned int reg;
- u8 temp_code;
- int temp = 0;
-
- if (data->soc == SOC_ARCH_EXYNOS4210)
- goto out;
-
- mutex_lock(&data->lock);
- clk_enable(data->clk);
- reg = readl(data->base + EXYNOS_EMUL_CON);
- clk_disable(data->clk);
- mutex_unlock(&data->lock);
-
- if (reg & EXYNOS_EMUL_ENABLE) {
- reg >>= EXYNOS_EMUL_DATA_SHIFT;
- temp_code = reg & EXYNOS_EMUL_DATA_MASK;
- temp = code_to_temp(data, temp_code);
- }
-out:
- return sprintf(buf, "%d\n", temp * MCELSIUS);
-}
-
-static ssize_t exynos_tmu_emulation_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct platform_device *pdev = container_of(dev,
- struct platform_device, dev);
- struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- unsigned int reg;
- int temp;
-
- if (data->soc == SOC_ARCH_EXYNOS4210)
- goto out;
-
- if (!sscanf(buf, "%d\n", &temp) || temp < 0)
- return -EINVAL;
-
- mutex_lock(&data->lock);
- clk_enable(data->clk);
-
- reg = readl(data->base + EXYNOS_EMUL_CON);
-
- if (temp) {
- /* Both CELSIUS and MCELSIUS type are available for input */
- if (temp > MCELSIUS)
- temp /= MCELSIUS;
-
- reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) |
- (temp_to_code(data, (temp / MCELSIUS))
- << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE;
- } else {
- reg &= ~EXYNOS_EMUL_ENABLE;
- }
-
- writel(reg, data->base + EXYNOS_EMUL_CON);
-
- clk_disable(data->clk);
- mutex_unlock(&data->lock);
-
-out:
- return count;
-}
-
-static DEVICE_ATTR(emulation, 0644, exynos_tmu_emulation_show,
- exynos_tmu_emulation_store);
-static int create_emulation_sysfs(struct device *dev)
-{
- return device_create_file(dev, &dev_attr_emulation);
-}
-static void remove_emulation_sysfs(struct device *dev)
-{
- device_remove_file(dev, &dev_attr_emulation);
-}
-#else
-static inline int create_emulation_sysfs(struct device *dev) { return 0; }
-static inline void remove_emulation_sysfs(struct device *dev) {}
-#endif
-
static int exynos_tmu_probe(struct platform_device *pdev)
{
struct exynos_tmu_data *data;
return ret;
}
- data->clk = clk_get(NULL, "tmu_apbif");
+ data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
if (IS_ERR(data->clk)) {
dev_err(&pdev->dev, "Failed to get clock\n");
return PTR_ERR(data->clk);
}
+ ret = clk_prepare(data->clk);
+ if (ret)
+ return ret;
+
if (pdata->type == SOC_ARCH_EXYNOS ||
pdata->type == SOC_ARCH_EXYNOS4210)
data->soc = pdata->type;
goto err_clk;
}
- ret = create_emulation_sysfs(&pdev->dev);
- if (ret)
- dev_err(&pdev->dev, "Failed to create emulation mode sysfs node\n");
-
return 0;
err_clk:
platform_set_drvdata(pdev, NULL);
- clk_put(data->clk);
+ clk_unprepare(data->clk);
return ret;
}
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- remove_emulation_sysfs(&pdev->dev);
-
exynos_tmu_control(pdev, false);
exynos_unregister_thermal();
- clk_put(data->clk);
+ clk_unprepare(data->clk);
platform_set_drvdata(pdev, NULL);
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
#include <linux/thermal.h>
#include "thermal_core.h"
static struct thermal_governor thermal_gov_fair_share = {
.name = "fair_share",
.throttle = fair_share_throttle,
- .owner = THIS_MODULE,
};
-static int __init thermal_gov_fair_share_init(void)
+int thermal_gov_fair_share_register(void)
{
return thermal_register_governor(&thermal_gov_fair_share);
}
-static void __exit thermal_gov_fair_share_exit(void)
+void thermal_gov_fair_share_unregister(void)
{
thermal_unregister_governor(&thermal_gov_fair_share);
}
-/* This should load after thermal framework */
-fs_initcall(thermal_gov_fair_share_init);
-module_exit(thermal_gov_fair_share_exit);
-
-MODULE_AUTHOR("Durgadoss R");
-MODULE_DESCRIPTION("A simple weight based thermal throttling governor");
-MODULE_LICENSE("GPL");
reg = readl_relaxed(priv->sensor);
/* Valid check */
- if (!(reg >> KIRKWOOD_THERMAL_VALID_OFFSET) &
- KIRKWOOD_THERMAL_VALID_MASK) {
+ if (!((reg >> KIRKWOOD_THERMAL_VALID_OFFSET) &
+ KIRKWOOD_THERMAL_VALID_MASK)) {
dev_err(&thermal->device,
"Temperature sensor reading not valid\n");
return -EIO;
}
/*
- * Calculate temperature. See Section 8.10.1 of the 88AP510,
- * datasheet, which has the same sensor.
- * Documentation/arm/Marvell/README
+ * Calculate temperature. According to Marvell internal
+ * documentation the formula for this is:
+ * Celsius = (322-reg)/1.3625
*/
reg = (reg >> KIRKWOOD_THERMAL_TEMP_OFFSET) &
KIRKWOOD_THERMAL_TEMP_MASK;
- *temp = ((2281638UL - (7298*reg)) / 10);
+ *temp = ((3220000000UL - (10000000UL * reg)) / 13625);
return 0;
}
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
spin_lock_init(&common->lock);
common->dev = dev;
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (irq) {
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(dev, "Could not allocate priv\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error_unregister;
}
priv->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto error_unregister;
+ }
priv->common = common;
priv->id = i;
goto error_unregister;
}
- list_move_tail(&priv->list, &common->head);
-
if (rcar_has_irq_support(priv))
rcar_thermal_irq_enable(priv);
+
+ list_move_tail(&priv->list, &common->head);
}
platform_set_drvdata(pdev, common);
return 0;
error_unregister:
- rcar_thermal_for_each_priv(priv, common)
+ rcar_thermal_for_each_priv(priv, common) {
thermal_zone_device_unregister(priv->zone);
+ if (rcar_has_irq_support(priv))
+ rcar_thermal_irq_disable(priv);
+ }
+
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return ret;
}
static int rcar_thermal_remove(struct platform_device *pdev)
{
struct rcar_thermal_common *common = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
struct rcar_thermal_priv *priv;
- rcar_thermal_for_each_priv(priv, common)
+ rcar_thermal_for_each_priv(priv, common) {
thermal_zone_device_unregister(priv->zone);
+ if (rcar_has_irq_support(priv))
+ rcar_thermal_irq_disable(priv);
+ }
platform_set_drvdata(pdev, NULL);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
return 0;
}
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
#include <linux/thermal.h>
#include "thermal_core.h"
switch (trend) {
case THERMAL_TREND_RAISING:
- if (throttle)
+ if (throttle) {
cur_state = cur_state < instance->upper ?
(cur_state + 1) : instance->upper;
+ if (cur_state < instance->lower)
+ cur_state = instance->lower;
+ }
break;
case THERMAL_TREND_RAISE_FULL:
if (throttle)
if (cur_state == instance->lower) {
if (!throttle)
cur_state = -1;
- } else
+ } else {
cur_state -= 1;
+ if (cur_state > instance->upper)
+ cur_state = instance->upper;
+ }
break;
case THERMAL_TREND_DROP_FULL:
if (cur_state == instance->lower) {
static struct thermal_governor thermal_gov_step_wise = {
.name = "step_wise",
.throttle = step_wise_throttle,
- .owner = THIS_MODULE,
};
-static int __init thermal_gov_step_wise_init(void)
+int thermal_gov_step_wise_register(void)
{
return thermal_register_governor(&thermal_gov_step_wise);
}
-static void __exit thermal_gov_step_wise_exit(void)
+void thermal_gov_step_wise_unregister(void)
{
thermal_unregister_governor(&thermal_gov_step_wise);
}
-
-/* This should load after thermal framework */
-fs_initcall(thermal_gov_step_wise_init);
-module_exit(thermal_gov_step_wise_exit);
-
-MODULE_AUTHOR("Durgadoss R");
-MODULE_DESCRIPTION("A step-by-step thermal throttling governor");
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Zhang Rui");
MODULE_DESCRIPTION("Generic thermal management sysfs support");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
static DEFINE_IDR(thermal_tz_idr);
static DEFINE_IDR(thermal_cdev_idr);
return err;
}
-EXPORT_SYMBOL_GPL(thermal_register_governor);
void thermal_unregister_governor(struct thermal_governor *governor)
{
mutex_unlock(&thermal_governor_lock);
return;
}
-EXPORT_SYMBOL_GPL(thermal_unregister_governor);
static int get_idr(struct idr *idr, struct mutex *lock, int *id)
{
monitor_thermal_zone(tz);
}
-static int thermal_zone_get_temp(struct thermal_zone_device *tz,
- unsigned long *temp)
+/**
+ * thermal_zone_get_temp() - returns its the temperature of thermal zone
+ * @tz: a valid pointer to a struct thermal_zone_device
+ * @temp: a valid pointer to where to store the resulting temperature.
+ *
+ * When a valid thermal zone reference is passed, it will fetch its
+ * temperature and fill @temp.
+ *
+ * Return: On success returns 0, an error code otherwise
+ */
+int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
{
- int ret = 0;
+ int ret = -EINVAL;
#ifdef CONFIG_THERMAL_EMULATION
int count;
unsigned long crit_temp = -1UL;
enum thermal_trip_type type;
#endif
+ if (!tz || IS_ERR(tz))
+ goto exit;
+
mutex_lock(&tz->lock);
ret = tz->ops->get_temp(tz, temp);
skip_emul:
#endif
mutex_unlock(&tz->lock);
+exit:
return ret;
}
+EXPORT_SYMBOL_GPL(thermal_zone_get_temp);
static void update_temperature(struct thermal_zone_device *tz)
{
for (count = 0; count < tz->trips; count++)
handle_thermal_trip(tz, count);
}
-EXPORT_SYMBOL(thermal_zone_device_update);
+EXPORT_SYMBOL_GPL(thermal_zone_device_update);
static void thermal_zone_device_check(struct work_struct *work)
{
#endif
/**
- * thermal_zone_bind_cooling_device - bind a cooling device to a thermal zone
- * @tz: thermal zone device
+ * thermal_zone_bind_cooling_device() - bind a cooling device to a thermal zone
+ * @tz: pointer to struct thermal_zone_device
* @trip: indicates which trip point the cooling devices is
* associated with in this thermal zone.
- * @cdev: thermal cooling device
+ * @cdev: pointer to struct thermal_cooling_device
+ * @upper: the Maximum cooling state for this trip point.
+ * THERMAL_NO_LIMIT means no upper limit,
+ * and the cooling device can be in max_state.
+ * @lower: the Minimum cooling state can be used for this trip point.
+ * THERMAL_NO_LIMIT means no lower limit,
+ * and the cooling device can be in cooling state 0.
*
+ * This interface function bind a thermal cooling device to the certain trip
+ * point of a thermal zone device.
* This function is usually called in the thermal zone device .bind callback.
+ *
+ * Return: 0 on success, the proper error value otherwise.
*/
int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
int trip,
kfree(dev);
return result;
}
-EXPORT_SYMBOL(thermal_zone_bind_cooling_device);
+EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device);
/**
- * thermal_zone_unbind_cooling_device - unbind a cooling device from a thermal zone
- * @tz: thermal zone device
+ * thermal_zone_unbind_cooling_device() - unbind a cooling device from a
+ * thermal zone.
+ * @tz: pointer to a struct thermal_zone_device.
* @trip: indicates which trip point the cooling devices is
* associated with in this thermal zone.
- * @cdev: thermal cooling device
+ * @cdev: pointer to a struct thermal_cooling_device.
*
+ * This interface function unbind a thermal cooling device from the certain
+ * trip point of a thermal zone device.
* This function is usually called in the thermal zone device .unbind callback.
+ *
+ * Return: 0 on success, the proper error value otherwise.
*/
int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
int trip,
kfree(pos);
return 0;
}
-EXPORT_SYMBOL(thermal_zone_unbind_cooling_device);
+EXPORT_SYMBOL_GPL(thermal_zone_unbind_cooling_device);
static void thermal_release(struct device *dev)
{
};
/**
- * thermal_cooling_device_register - register a new thermal cooling device
+ * thermal_cooling_device_register() - register a new thermal cooling device
* @type: the thermal cooling device type.
* @devdata: device private data.
* @ops: standard thermal cooling devices callbacks.
+ *
+ * This interface function adds a new thermal cooling device (fan/processor/...)
+ * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
+ * to all the thermal zone devices registered at the same time.
+ *
+ * Return: a pointer to the created struct thermal_cooling_device or an
+ * ERR_PTR. Caller must check return value with IS_ERR*() helpers.
*/
struct thermal_cooling_device *
thermal_cooling_device_register(char *type, void *devdata,
return ERR_PTR(result);
}
- strcpy(cdev->type, type ? : "");
+ strlcpy(cdev->type, type ? : "", sizeof(cdev->type));
mutex_init(&cdev->lock);
INIT_LIST_HEAD(&cdev->thermal_instances);
cdev->ops = ops;
device_unregister(&cdev->device);
return ERR_PTR(result);
}
-EXPORT_SYMBOL(thermal_cooling_device_register);
+EXPORT_SYMBOL_GPL(thermal_cooling_device_register);
/**
* thermal_cooling_device_unregister - removes the registered thermal cooling device
device_unregister(&cdev->device);
return;
}
-EXPORT_SYMBOL(thermal_cooling_device_unregister);
+EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister);
void thermal_cdev_update(struct thermal_cooling_device *cdev)
{
EXPORT_SYMBOL(thermal_cdev_update);
/**
- * notify_thermal_framework - Sensor drivers use this API to notify framework
+ * thermal_notify_framework - Sensor drivers use this API to notify framework
* @tz: thermal zone device
* @trip: indicates which trip point has been crossed
*
* The throttling policy is based on the configured platform data; if no
* platform data is provided, this uses the step_wise throttling policy.
*/
-void notify_thermal_framework(struct thermal_zone_device *tz, int trip)
+void thermal_notify_framework(struct thermal_zone_device *tz, int trip)
{
handle_thermal_trip(tz, trip);
}
-EXPORT_SYMBOL(notify_thermal_framework);
+EXPORT_SYMBOL_GPL(thermal_notify_framework);
/**
- * create_trip_attrs - create attributes for trip points
+ * create_trip_attrs() - create attributes for trip points
* @tz: the thermal zone device
* @mask: Writeable trip point bitmap.
+ *
+ * helper function to instantiate sysfs entries for every trip
+ * point and its properties of a struct thermal_zone_device.
+ *
+ * Return: 0 on success, the proper error value otherwise.
*/
static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
{
}
/**
- * thermal_zone_device_register - register a new thermal zone device
+ * thermal_zone_device_register() - register a new thermal zone device
* @type: the thermal zone device type
* @trips: the number of trip points the thermal zone support
* @mask: a bit string indicating the writeablility of trip points
* whether trip points have been crossed (0 for interrupt
* driven systems)
*
+ * This interface function adds a new thermal zone device (sensor) to
+ * /sys/class/thermal folder as thermal_zone[0-*]. It tries to bind all the
+ * thermal cooling devices registered at the same time.
* thermal_zone_device_unregister() must be called when the device is no
* longer needed. The passive cooling depends on the .get_trend() return value.
+ *
+ * Return: a pointer to the created struct thermal_zone_device or an
+ * in case of error, an ERR_PTR. Caller must check return value with
+ * IS_ERR*() helpers.
*/
struct thermal_zone_device *thermal_zone_device_register(const char *type,
int trips, int mask, void *devdata,
return ERR_PTR(result);
}
- strcpy(tz->type, type ? : "");
+ strlcpy(tz->type, type ? : "", sizeof(tz->type));
tz->ops = ops;
tz->tzp = tzp;
tz->device.class = &thermal_class;
device_unregister(&tz->device);
return ERR_PTR(result);
}
-EXPORT_SYMBOL(thermal_zone_device_register);
+EXPORT_SYMBOL_GPL(thermal_zone_device_register);
/**
* thermal_device_unregister - removes the registered thermal zone device
device_unregister(&tz->device);
return;
}
-EXPORT_SYMBOL(thermal_zone_device_unregister);
+EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
+
+/**
+ * thermal_zone_get_zone_by_name() - search for a zone and returns its ref
+ * @name: thermal zone name to fetch the temperature
+ *
+ * When only one zone is found with the passed name, returns a reference to it.
+ *
+ * Return: On success returns a reference to an unique thermal zone with
+ * matching name equals to @name, an ERR_PTR otherwise (-EINVAL for invalid
+ * paramenters, -ENODEV for not found and -EEXIST for multiple matches).
+ */
+struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name)
+{
+ struct thermal_zone_device *pos = NULL, *ref = ERR_PTR(-EINVAL);
+ unsigned int found = 0;
+
+ if (!name)
+ goto exit;
+
+ mutex_lock(&thermal_list_lock);
+ list_for_each_entry(pos, &thermal_tz_list, node)
+ if (!strnicmp(name, pos->type, THERMAL_NAME_LENGTH)) {
+ found++;
+ ref = pos;
+ }
+ mutex_unlock(&thermal_list_lock);
+
+ /* nothing has been found, thus an error code for it */
+ if (found == 0)
+ ref = ERR_PTR(-ENODEV);
+ else if (found > 1)
+ /* Success only when an unique zone is found */
+ ref = ERR_PTR(-EEXIST);
+
+exit:
+ return ref;
+}
+EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name);
#ifdef CONFIG_NET
static struct genl_family thermal_event_genl_family = {
return result;
}
-EXPORT_SYMBOL(thermal_generate_netlink_event);
+EXPORT_SYMBOL_GPL(thermal_generate_netlink_event);
static int genetlink_init(void)
{
static inline void genetlink_exit(void) {}
#endif /* !CONFIG_NET */
+static int __init thermal_register_governors(void)
+{
+ int result;
+
+ result = thermal_gov_step_wise_register();
+ if (result)
+ return result;
+
+ result = thermal_gov_fair_share_register();
+ if (result)
+ return result;
+
+ return thermal_gov_user_space_register();
+}
+
+static void thermal_unregister_governors(void)
+{
+ thermal_gov_step_wise_unregister();
+ thermal_gov_fair_share_unregister();
+ thermal_gov_user_space_unregister();
+}
+
static int __init thermal_init(void)
{
- int result = 0;
+ int result;
+
+ result = thermal_register_governors();
+ if (result)
+ goto error;
result = class_register(&thermal_class);
- if (result) {
- idr_destroy(&thermal_tz_idr);
- idr_destroy(&thermal_cdev_idr);
- mutex_destroy(&thermal_idr_lock);
- mutex_destroy(&thermal_list_lock);
- return result;
- }
+ if (result)
+ goto unregister_governors;
+
result = genetlink_init();
+ if (result)
+ goto unregister_class;
+
+ return 0;
+
+unregister_governors:
+ thermal_unregister_governors();
+unregister_class:
+ class_unregister(&thermal_class);
+error:
+ idr_destroy(&thermal_tz_idr);
+ idr_destroy(&thermal_cdev_idr);
+ mutex_destroy(&thermal_idr_lock);
+ mutex_destroy(&thermal_list_lock);
+ mutex_destroy(&thermal_governor_lock);
return result;
}
static void __exit thermal_exit(void)
{
+ genetlink_exit();
class_unregister(&thermal_class);
+ thermal_unregister_governors();
idr_destroy(&thermal_tz_idr);
idr_destroy(&thermal_cdev_idr);
mutex_destroy(&thermal_idr_lock);
mutex_destroy(&thermal_list_lock);
- genetlink_exit();
+ mutex_destroy(&thermal_governor_lock);
}
fs_initcall(thermal_init);
struct list_head cdev_node; /* node in cdev->thermal_instances */
};
+int thermal_register_governor(struct thermal_governor *);
+void thermal_unregister_governor(struct thermal_governor *);
+
+#ifdef CONFIG_THERMAL_GOV_STEP_WISE
+int thermal_gov_step_wise_register(void);
+void thermal_gov_step_wise_unregister(void);
+#else
+static inline int thermal_gov_step_wise_register(void) { return 0; }
+static inline void thermal_gov_step_wise_unregister(void) {}
+#endif /* CONFIG_THERMAL_GOV_STEP_WISE */
+
+#ifdef CONFIG_THERMAL_GOV_FAIR_SHARE
+int thermal_gov_fair_share_register(void);
+void thermal_gov_fair_share_unregister(void);
+#else
+static inline int thermal_gov_fair_share_register(void) { return 0; }
+static inline void thermal_gov_fair_share_unregister(void) {}
+#endif /* CONFIG_THERMAL_GOV_FAIR_SHARE */
+
+#ifdef CONFIG_THERMAL_GOV_USER_SPACE
+int thermal_gov_user_space_register(void);
+void thermal_gov_user_space_unregister(void);
+#else
+static inline int thermal_gov_user_space_register(void) { return 0; }
+static inline void thermal_gov_user_space_unregister(void) {}
+#endif /* CONFIG_THERMAL_GOV_USER_SPACE */
+
#endif /* __THERMAL_CORE_H__ */
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
#include <linux/thermal.h>
#include "thermal_core.h"
static struct thermal_governor thermal_gov_user_space = {
.name = "user_space",
.throttle = notify_user_space,
- .owner = THIS_MODULE,
};
-static int __init thermal_gov_user_space_init(void)
+int thermal_gov_user_space_register(void)
{
return thermal_register_governor(&thermal_gov_user_space);
}
-static void __exit thermal_gov_user_space_exit(void)
+void thermal_gov_user_space_unregister(void)
{
thermal_unregister_governor(&thermal_gov_user_space);
}
-/* This should load after thermal framework */
-fs_initcall(thermal_gov_user_space_init);
-module_exit(thermal_gov_user_space_exit);
-
-MODULE_AUTHOR("Durgadoss R");
-MODULE_DESCRIPTION("A user space Thermal notifier");
-MODULE_LICENSE("GPL");
hcd = usb_create_hcd(&ehci_tilegx_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
- if (!hcd)
- return -ENOMEM;
+ if (!hcd) {
+ ret = -ENOMEM;
+ goto err_hcd;
+ }
/*
* We don't use rsrc_start to map in our registers, but seems like
err_no_irq:
tilegx_stop_ehc();
usb_put_hcd(hcd);
+err_hcd:
gxio_usb_host_destroy(&pdata->usb_ctx);
return ret;
}
hcd = usb_create_hcd(&ohci_tilegx_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
- if (!hcd)
- return -ENOMEM;
+ if (!hcd) {
+ ret = -ENOMEM;
+ goto err_hcd;
+ }
/*
* We don't use rsrc_start to map in our registers, but seems like
err_no_irq:
tilegx_stop_ohc();
usb_put_hcd(hcd);
+err_hcd:
gxio_usb_host_destroy(&pdata->usb_ctx);
return ret;
}
config USB_GPIO_VBUS
tristate "GPIO based peripheral-only VBUS sensing 'transceiver'"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Provides simple GPIO VBUS sensing for controllers with an
internal transceiver via the usb_phy interface, and
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_MODE_HELPERS
- select OF_VIDEOMODE
+ select VIDEOMODE_HELPERS
help
Framebuffer support for the MXS SoC.
tristate "Solomon SSD1307 framebuffer support"
depends on FB && I2C
depends on OF
- depends on GENERIC_GPIO
+ depends on GPIOLIB
select FB_SYS_FOPS
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
config LCD_L4F00242T03
tristate "Epson L4F00242T03 LCD"
- depends on SPI_MASTER && GENERIC_GPIO
+ depends on SPI_MASTER && GPIOLIB
help
SPI driver for Epson L4F00242T03. This provides basic support
for init and powering the LCD up/down through a sysfs interface.
config LCD_LMS283GF05
tristate "Samsung LMS283GF05 LCD"
- depends on SPI_MASTER && GENERIC_GPIO
+ depends on SPI_MASTER && GPIOLIB
help
SPI driver for Samsung LMS283GF05. This provides basic support
for powering the LCD up/down through a sysfs interface.
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/of_device.h>
-#include <video/of_display_timing.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/pinctrl/consumer.h>
#include <linux/fb.h>
#include <linux/regulator/consumer.h>
+#include <video/of_display_timing.h>
#include <video/videomode.h>
#define REG_SET 4
struct videomode vm;
struct fb_videomode fb_vm;
- ret = videomode_from_timing(timings, &vm, i);
+ ret = videomode_from_timings(timings, &vm, i);
if (ret < 0)
goto put_timings_node;
ret = fb_videomode_from_videomode(&vm, &fb_vm);
if (ret < 0)
goto put_timings_node;
- if (vm.data_flags & DISPLAY_FLAGS_DE_HIGH)
+ if (vm.flags & DISPLAY_FLAGS_DE_HIGH)
host->sync |= MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
- if (vm.data_flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+ if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
host->sync |= MXSFB_SYNC_DOTCLK_FALLING_ACT;
fb_add_videomode(&fb_vm, &fb_info->modelist);
}
config W1_MASTER_GPIO
tristate "GPIO 1-wire busmaster"
- depends on GENERIC_GPIO
+ depends on GPIOLIB
help
Say Y here if you want to communicate with your 1-wire devices using
GPIO pins. This driver uses the GPIO API to control the wire.
return -EINVAL;
}
- wdt_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!wdt_base) {
- dev_err(&pdev->dev, "unable to remap memory region\n");
- return -ENOMEM;
- }
+ wdt_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(wdt_base))
+ return PTR_ERR(wdt_base);
wdt_clk = devm_clk_get(&pdev->dev, "wdt");
if (IS_ERR(wdt_clk))
#include <linux/device.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/err.h>
#define MODULE_NAME "DAVINCI-WDT: "
return -ENOENT;
}
- wdt_base = devm_request_and_ioremap(dev, wdt_mem);
- if (!wdt_base) {
- dev_err(dev, "ioremap failed\n");
- return -EADDRNOTAVAIL;
- }
+ wdt_base = devm_ioremap_resource(dev, wdt_mem);
+ if (IS_ERR(wdt_base))
+ return PTR_ERR(wdt_base);
ret = misc_register(&davinci_wdt_miscdev);
if (ret < 0) {
}
/* get the memory region for the watchdog timer */
- wdt_base = devm_request_and_ioremap(dev, wdt_mem);
- if (wdt_base == NULL) {
- dev_err(dev, "failed to devm_request_and_ioremap() region\n");
- ret = -ENOMEM;
+ wdt_base = devm_ioremap_resource(dev, wdt_mem);
+ if (IS_ERR(wdt_base)) {
+ ret = PTR_ERR(wdt_base);
goto err;
}
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/err.h>
#include <asm/watchdog.h>
#define DRV_NAME "sh-wdt"
wdt->clk = NULL;
}
- wdt->base = devm_request_and_ioremap(wdt->dev, res);
- if (unlikely(!wdt->base)) {
- rc = -EADDRNOTAVAIL;
+ wdt->base = devm_ioremap_resource(wdt->dev, res);
+ if (IS_ERR(wdt->base)) {
+ rc = PTR_ERR(wdt->base);
goto err;
}
int err, devno;
if (watchdog->id == 0) {
+ old_wdd = watchdog;
watchdog_miscdev.parent = watchdog->parent;
err = misc_register(&watchdog_miscdev);
if (err != 0) {
if (err == -EBUSY)
pr_err("%s: a legacy watchdog module is probably present.\n",
watchdog->info->identity);
+ old_wdd = NULL;
return err;
}
- old_wdd = watchdog;
}
/* Fill in the data structures */
config BTRFS_FS
- tristate "Btrfs filesystem Unstable disk format"
+ tristate "Btrfs filesystem support"
select LIBCRC32C
select ZLIB_INFLATE
select ZLIB_DEFLATE
In most cases, unless you are a btrfs developer who needs
to verify the integrity of (super)-block write requests
during the run of a regression test, say N
+
+config BTRFS_FS_RUN_SANITY_TESTS
+ bool "Btrfs will run sanity tests upon loading"
+ depends on BTRFS_FS
+ help
+ This will run some basic sanity tests on the free space cache
+ code to make sure it is acting as it should. These are mostly
+ regression tests and are only really interesting to btrfs devlopers.
+
+ If unsure, say N.
+
+config BTRFS_DEBUG
+ bool "Btrfs debugging support"
+ depends on BTRFS_FS
+ help
+ Enable run-time debugging support for the btrfs filesystem. This may
+ enable additional and expensive checks with negative impact on
+ performance, or export extra information via sysfs.
+
+ If unsure, say N.
err = __resolve_indirect_ref(fs_info, search_commit_root,
time_seq, ref, parents,
extent_item_pos);
+ if (err == -ENOMEM)
+ goto out;
if (err)
continue;
new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
if (!new_ref) {
ret = -ENOMEM;
- break;
+ goto out;
}
memcpy(new_ref, ref, sizeof(*ref));
new_ref->parent = node->val;
}
ulist_reinit(parents);
}
-
+out:
ulist_free(parents);
return ret;
}
BUG_ON(!ref->wanted_disk_byte);
eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
fs_info->tree_root->leafsize, 0);
- BUG_ON(!eb);
+ if (!eb || !extent_buffer_uptodate(eb)) {
+ free_extent_buffer(eb);
+ return -EIO;
+ }
btrfs_tree_read_lock(eb);
if (btrfs_header_level(eb) == 0)
btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
* having a parent).
* mode = 2: merge identical parents
*/
-static int __merge_refs(struct list_head *head, int mode)
+static void __merge_refs(struct list_head *head, int mode)
{
struct list_head *pos1;
}
}
- return 0;
}
/*
default:
WARN_ON(1);
}
- BUG_ON(ret);
+ if (ret)
+ return ret;
}
return 0;
default:
WARN_ON(1);
}
- BUG_ON(ret);
+ if (ret)
+ return ret;
ptr += btrfs_extent_inline_ref_size(type);
}
default:
WARN_ON(1);
}
- BUG_ON(ret);
+ if (ret)
+ return ret;
+
}
return ret;
if (ret)
goto out;
- ret = __merge_refs(&prefs, 1);
- if (ret)
- goto out;
+ __merge_refs(&prefs, 1);
ret = __resolve_indirect_refs(fs_info, search_commit_root, time_seq,
&prefs, extent_item_pos);
if (ret)
goto out;
- ret = __merge_refs(&prefs, 2);
- if (ret)
- goto out;
+ __merge_refs(&prefs, 2);
while (!list_empty(&prefs)) {
ref = list_first_entry(&prefs, struct __prelim_ref, list);
if (ref->count && ref->root_id && ref->parent == 0) {
/* no parent == root of tree */
ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
- BUG_ON(ret < 0);
+ if (ret < 0)
+ goto out;
}
if (ref->count && ref->parent) {
struct extent_inode_elem *eie = NULL;
info_level);
eb = read_tree_block(fs_info->extent_root,
ref->parent, bsz, 0);
- BUG_ON(!eb);
+ if (!eb || !extent_buffer_uptodate(eb)) {
+ free_extent_buffer(eb);
+ return -EIO;
+ }
ret = find_extent_in_eb(eb, bytenr,
*extent_item_pos, &eie);
ref->inode_list = eie;
ret = ulist_add_merge(refs, ref->parent,
(uintptr_t)ref->inode_list,
(u64 *)&eie, GFP_NOFS);
+ if (ret < 0)
+ goto out;
if (!ret && extent_item_pos) {
/*
* we've recorded that parent, so we must extend
eie = eie->next;
eie->next = ref->inode_list;
}
- BUG_ON(ret < 0);
}
kfree(ref);
}
return ret;
}
+/*
+ * this iterates to turn a name (from iref/extref) into a full filesystem path.
+ * Elements of the path are separated by '/' and the path is guaranteed to be
+ * 0-terminated. the path is only given within the current file system.
+ * Therefore, it never starts with a '/'. the caller is responsible to provide
+ * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
+ * the start point of the resulting string is returned. this pointer is within
+ * dest, normally.
+ * in case the path buffer would overflow, the pointer is decremented further
+ * as if output was written to the buffer, though no more output is actually
+ * generated. that way, the caller can determine how much space would be
+ * required for the path to fit into the buffer. in that case, the returned
+ * value will be smaller than dest. callers must check this!
+ */
char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
u32 name_len, unsigned long name_off,
struct extent_buffer *eb_in, u64 parent,
return dest + bytes_left;
}
-/*
- * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements
- * of the path are separated by '/' and the path is guaranteed to be
- * 0-terminated. the path is only given within the current file system.
- * Therefore, it never starts with a '/'. the caller is responsible to provide
- * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
- * the start point of the resulting string is returned. this pointer is within
- * dest, normally.
- * in case the path buffer would overflow, the pointer is decremented further
- * as if output was written to the buffer, though no more output is actually
- * generated. that way, the caller can determine how much space would be
- * required for the path to fit into the buffer. in that case, the returned
- * value will be smaller than dest. callers must check this!
- */
-char *btrfs_iref_to_path(struct btrfs_root *fs_root,
- struct btrfs_path *path,
- struct btrfs_inode_ref *iref,
- struct extent_buffer *eb_in, u64 parent,
- char *dest, u32 size)
-{
- return btrfs_ref_to_path(fs_root, path,
- btrfs_inode_ref_name_len(eb_in, iref),
- (unsigned long)(iref + 1),
- eb_in, parent, dest, size);
-}
-
/*
* this makes the path point to (logical EXTENT_ITEM *)
* returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
iterate_extent_inodes_t *iterate, void *ctx)
{
int ret;
- struct list_head data_refs = LIST_HEAD_INIT(data_refs);
- struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
struct btrfs_trans_handle *trans;
struct ulist *refs = NULL;
struct ulist *roots = NULL;
iterate, ctx);
}
ulist_free(roots);
- roots = NULL;
}
free_leaf_list(refs);
- ulist_free(roots);
out:
if (!search_commit_root) {
btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
u64 time_seq, struct ulist **roots);
-char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
- struct btrfs_inode_ref *iref, struct extent_buffer *eb,
- u64 parent, char *dest, u32 size);
char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
u32 name_len, unsigned long name_off,
struct extent_buffer *eb_in, u64 parent,
unsigned long runtime_flags;
- /* Keep track of who's O_SYNC/fsycing currently */
+ /* Keep track of who's O_SYNC/fsyncing currently */
atomic_t sync_writers;
/* full 64 bit generation number, struct vfs_inode doesn't have a big
u32 sums;
};
+static int btrfs_decompress_biovec(int type, struct page **pages_in,
+ u64 disk_start, struct bio_vec *bvec,
+ int vcnt, size_t srclen);
+
static inline int compressed_bio_size(struct btrfs_root *root,
unsigned long disk_size)
{
u64 disk_start)
{
int ret;
- struct btrfs_root *root = BTRFS_I(inode)->root;
struct page *page;
unsigned long i;
char *kaddr;
csum = ~(u32)0;
kaddr = kmap_atomic(page);
- csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE);
+ csum = btrfs_csum_data(kaddr, csum, PAGE_CACHE_SIZE);
btrfs_csum_final(csum, (char *)&csum);
kunmap_atomic(kaddr);
static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES];
static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES];
-struct btrfs_compress_op *btrfs_compress_op[] = {
+static struct btrfs_compress_op *btrfs_compress_op[] = {
&btrfs_zlib_compress,
&btrfs_lzo_compress,
};
* be contiguous. They all correspond to the range of bytes covered by
* the compressed extent.
*/
-int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start,
- struct bio_vec *bvec, int vcnt, size_t srclen)
+static int btrfs_decompress_biovec(int type, struct page **pages_in,
+ u64 disk_start, struct bio_vec *bvec,
+ int vcnt, size_t srclen)
{
struct list_head *workspace;
int ret;
unsigned long *total_in,
unsigned long *total_out,
unsigned long max_out);
-int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start,
- struct bio_vec *bvec, int vcnt, size_t srclen);
int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
unsigned long start_byte, size_t srclen, size_t destlen);
int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
struct btrfs_root *root,
struct extent_buffer *dst_buf,
struct extent_buffer *src_buf);
-static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct btrfs_path *path, int level, int slot);
+static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
+ int level, int slot);
static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb);
-struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
- u32 blocksize, u64 parent_transid,
- u64 time_seq);
-struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
- u64 bytenr, u32 blocksize,
- u64 time_seq);
+static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
struct btrfs_path *btrfs_alloc_path(void)
{
* tree until you end up with a lock on the root. A locked buffer
* is returned, with a reference held.
*/
-struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
+static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
{
struct extent_buffer *eb;
write_unlock(&fs_info->tree_mod_log_lock);
}
+/*
+ * Increment the upper half of tree_mod_seq, set lower half zero.
+ *
+ * Must be called with fs_info->tree_mod_seq_lock held.
+ */
+static inline u64 btrfs_inc_tree_mod_seq_major(struct btrfs_fs_info *fs_info)
+{
+ u64 seq = atomic64_read(&fs_info->tree_mod_seq);
+ seq &= 0xffffffff00000000ull;
+ seq += 1ull << 32;
+ atomic64_set(&fs_info->tree_mod_seq, seq);
+ return seq;
+}
+
+/*
+ * Increment the lower half of tree_mod_seq.
+ *
+ * Must be called with fs_info->tree_mod_seq_lock held. The way major numbers
+ * are generated should not technically require a spin lock here. (Rationale:
+ * incrementing the minor while incrementing the major seq number is between its
+ * atomic64_read and atomic64_set calls doesn't duplicate sequence numbers, it
+ * just returns a unique sequence number as usual.) We have decided to leave
+ * that requirement in here and rethink it once we notice it really imposes a
+ * problem on some workload.
+ */
+static inline u64 btrfs_inc_tree_mod_seq_minor(struct btrfs_fs_info *fs_info)
+{
+ return atomic64_inc_return(&fs_info->tree_mod_seq);
+}
+
+/*
+ * return the last minor in the previous major tree_mod_seq number
+ */
+u64 btrfs_tree_mod_seq_prev(u64 seq)
+{
+ return (seq & 0xffffffff00000000ull) - 1ull;
+}
+
/*
* This adds a new blocker to the tree mod log's blocker list if the @elem
* passed does not already have a sequence number set. So when a caller expects
tree_mod_log_write_lock(fs_info);
spin_lock(&fs_info->tree_mod_seq_lock);
if (!elem->seq) {
- elem->seq = btrfs_inc_tree_mod_seq(fs_info);
+ elem->seq = btrfs_inc_tree_mod_seq_major(fs_info);
list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
}
- seq = btrfs_inc_tree_mod_seq(fs_info);
+ seq = btrfs_inc_tree_mod_seq_minor(fs_info);
spin_unlock(&fs_info->tree_mod_seq_lock);
tree_mod_log_write_unlock(fs_info);
if (!tm)
return -ENOMEM;
- tm->seq = btrfs_inc_tree_mod_seq(fs_info);
+ spin_lock(&fs_info->tree_mod_seq_lock);
+ tm->seq = btrfs_inc_tree_mod_seq_minor(fs_info);
+ spin_unlock(&fs_info->tree_mod_seq_lock);
+
return tm->seq;
}
static noinline int
tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
struct extent_buffer *old_root,
- struct extent_buffer *new_root, gfp_t flags)
+ struct extent_buffer *new_root, gfp_t flags,
+ int log_removal)
{
struct tree_mod_elem *tm;
int ret;
if (tree_mod_dont_log(fs_info, NULL))
return 0;
- __tree_mod_log_free_eb(fs_info, old_root);
+ if (log_removal)
+ __tree_mod_log_free_eb(fs_info, old_root);
ret = tree_mod_alloc(fs_info, flags, &tm);
if (ret < 0)
static noinline void
tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
struct extent_buffer *src, unsigned long dst_offset,
- unsigned long src_offset, int nr_items, int log_removal)
+ unsigned long src_offset, int nr_items)
{
int ret;
int i;
}
for (i = 0; i < nr_items; i++) {
- if (log_removal) {
- ret = tree_mod_log_insert_key_locked(fs_info, src,
- i + src_offset,
- MOD_LOG_KEY_REMOVE);
- BUG_ON(ret < 0);
- }
+ ret = tree_mod_log_insert_key_locked(fs_info, src,
+ i + src_offset,
+ MOD_LOG_KEY_REMOVE);
+ BUG_ON(ret < 0);
ret = tree_mod_log_insert_key_locked(fs_info, dst,
i + dst_offset,
MOD_LOG_KEY_ADD);
static noinline void
tree_mod_log_set_root_pointer(struct btrfs_root *root,
- struct extent_buffer *new_root_node)
+ struct extent_buffer *new_root_node,
+ int log_removal)
{
int ret;
ret = tree_mod_log_insert_root(root->fs_info, root->node,
- new_root_node, GFP_NOFS);
+ new_root_node, GFP_NOFS, log_removal);
BUG_ON(ret < 0);
}
if (btrfs_block_can_be_shared(root, buf)) {
ret = btrfs_lookup_extent_info(trans, root, buf->start,
- buf->len, &refs, &flags);
+ btrfs_header_level(buf), 1,
+ &refs, &flags);
if (ret)
return ret;
if (refs == 0) {
parent_start = 0;
extent_buffer_get(cow);
- tree_mod_log_set_root_pointer(root, cow);
+ tree_mod_log_set_root_pointer(root, cow, 1);
rcu_assign_pointer(root->node, cow);
btrfs_free_tree_block(trans, root, buf, parent_start,
*/
static struct tree_mod_elem *
__tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
- struct btrfs_root *root, u64 time_seq)
+ struct extent_buffer *eb_root, u64 time_seq)
{
struct tree_mod_elem *tm;
struct tree_mod_elem *found = NULL;
- u64 root_logical = root->node->start;
+ u64 root_logical = eb_root->start;
int looped = 0;
if (!time_seq)
found = tm;
root_logical = tm->old_root.logical;
- BUG_ON(root_logical == root->node->start);
looped = 1;
}
btrfs_set_header_nritems(eb, n);
}
+/*
+ * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
+ * is returned. If rewind operations happen, a fresh buffer is returned. The
+ * returned buffer is always read-locked. If the returned buffer is not the
+ * input buffer, the lock on the input buffer is released and the input buffer
+ * is freed (its refcount is decremented).
+ */
static struct extent_buffer *
tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
u64 time_seq)
}
extent_buffer_get(eb_rewin);
+ btrfs_tree_read_unlock(eb);
free_extent_buffer(eb);
+ extent_buffer_get(eb_rewin);
+ btrfs_tree_read_lock(eb_rewin);
__tree_mod_log_rewind(eb_rewin, time_seq, tm);
WARN_ON(btrfs_header_nritems(eb_rewin) >
BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
get_old_root(struct btrfs_root *root, u64 time_seq)
{
struct tree_mod_elem *tm;
- struct extent_buffer *eb;
+ struct extent_buffer *eb = NULL;
+ struct extent_buffer *eb_root;
struct extent_buffer *old;
struct tree_mod_root *old_root = NULL;
u64 old_generation = 0;
u64 logical;
u32 blocksize;
- eb = btrfs_read_lock_root_node(root);
- tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
+ eb_root = btrfs_read_lock_root_node(root);
+ tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
if (!tm)
- return root->node;
+ return eb_root;
if (tm->op == MOD_LOG_ROOT_REPLACE) {
old_root = &tm->old_root;
old_generation = tm->generation;
logical = old_root->logical;
} else {
- logical = root->node->start;
+ logical = eb_root->start;
}
tm = tree_mod_log_search(root->fs_info, logical, time_seq);
if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
- btrfs_tree_read_unlock(root->node);
- free_extent_buffer(root->node);
+ btrfs_tree_read_unlock(eb_root);
+ free_extent_buffer(eb_root);
blocksize = btrfs_level_size(root, old_root->level);
old = read_tree_block(root, logical, blocksize, 0);
- if (!old) {
+ if (!old || !extent_buffer_uptodate(old)) {
+ free_extent_buffer(old);
pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
logical);
WARN_ON(1);
free_extent_buffer(old);
}
} else if (old_root) {
- btrfs_tree_read_unlock(root->node);
- free_extent_buffer(root->node);
+ btrfs_tree_read_unlock(eb_root);
+ free_extent_buffer(eb_root);
eb = alloc_dummy_extent_buffer(logical, root->nodesize);
} else {
- eb = btrfs_clone_extent_buffer(root->node);
- btrfs_tree_read_unlock(root->node);
- free_extent_buffer(root->node);
+ eb = btrfs_clone_extent_buffer(eb_root);
+ btrfs_tree_read_unlock(eb_root);
+ free_extent_buffer(eb_root);
}
if (!eb)
if (old_root) {
btrfs_set_header_bytenr(eb, eb->start);
btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
- btrfs_set_header_owner(eb, root->root_key.objectid);
+ btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
btrfs_set_header_level(eb, old_root->level);
btrfs_set_header_generation(eb, old_generation);
}
{
struct tree_mod_elem *tm;
int level;
+ struct extent_buffer *eb_root = btrfs_root_node(root);
- tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
+ tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
level = tm->old_root.level;
} else {
- rcu_read_lock();
- level = btrfs_header_level(root->node);
- rcu_read_unlock();
+ level = btrfs_header_level(eb_root);
}
+ free_extent_buffer(eb_root);
return level;
}
if (!cur) {
cur = read_tree_block(root, blocknr,
blocksize, gen);
- if (!cur)
+ if (!cur || !extent_buffer_uptodate(cur)) {
+ free_extent_buffer(cur);
return -EIO;
+ }
} else if (!uptodate) {
err = btrfs_read_buffer(cur, gen);
if (err) {
struct extent_buffer *parent, int slot)
{
int level = btrfs_header_level(parent);
+ struct extent_buffer *eb;
+
if (slot < 0)
return NULL;
if (slot >= btrfs_header_nritems(parent))
BUG_ON(level == 0);
- return read_tree_block(root, btrfs_node_blockptr(parent, slot),
- btrfs_level_size(root, level - 1),
- btrfs_node_ptr_generation(parent, slot));
+ eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
+ btrfs_level_size(root, level - 1),
+ btrfs_node_ptr_generation(parent, slot));
+ if (eb && !extent_buffer_uptodate(eb)) {
+ free_extent_buffer(eb);
+ eb = NULL;
+ }
+
+ return eb;
}
/*
goto enospc;
}
- tree_mod_log_set_root_pointer(root, child);
+ tree_mod_log_set_root_pointer(root, child, 1);
rcu_assign_pointer(root->node, child);
add_root_to_dirty_list(root);
if (btrfs_header_nritems(right) == 0) {
clean_tree_block(trans, root, right);
btrfs_tree_unlock(right);
- del_ptr(trans, root, path, level + 1, pslot + 1);
+ del_ptr(root, path, level + 1, pslot + 1);
root_sub_used(root, right->len);
btrfs_free_tree_block(trans, root, right, 0, 1);
free_extent_buffer_stale(right);
if (btrfs_header_nritems(mid) == 0) {
clean_tree_block(trans, root, mid);
btrfs_tree_unlock(mid);
- del_ptr(trans, root, path, level + 1, pslot);
+ del_ptr(root, path, level + 1, pslot);
root_sub_used(root, mid->len);
btrfs_free_tree_block(trans, root, mid, 0, 1);
free_extent_buffer_stale(mid);
int no_skips = 0;
struct extent_buffer *t;
- if (path->really_keep_locks)
- return;
-
for (i = level; i < BTRFS_MAX_LEVEL; i++) {
if (!path->nodes[i])
break;
{
int i;
- if (path->keep_locks || path->really_keep_locks)
+ if (path->keep_locks)
return;
for (i = level; i < BTRFS_MAX_LEVEL; i++) {
if (!cow)
write_lock_level = -1;
- if (cow && (p->really_keep_locks || p->keep_locks || p->lowest_level))
+ if (cow && (p->keep_locks || p->lowest_level))
write_lock_level = BTRFS_MAX_LEVEL;
min_write_lock_level = write_lock_level;
btrfs_clear_path_blocking(p, b,
BTRFS_READ_LOCK);
}
+ b = tree_mod_log_rewind(root->fs_info, b, time_seq);
p->locks[level] = BTRFS_READ_LOCK;
p->nodes[level] = b;
- b = tree_mod_log_rewind(root->fs_info, b, time_seq);
- if (b != p->nodes[level]) {
- btrfs_tree_unlock_rw(p->nodes[level],
- p->locks[level]);
- p->locks[level] = 0;
- p->nodes[level] = b;
- }
} else {
p->slots[level] = slot;
unlock_up(p, level, lowest_unlock, 0, NULL);
* higher levels
*
*/
-static void fixup_low_keys(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
+static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_disk_key *key, int level)
{
int i;
* This function isn't completely safe. It's the caller's responsibility
* that the new key won't break the order
*/
-void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
+void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_key *new_key)
{
struct btrfs_disk_key disk_key;
btrfs_set_item_key(eb, &disk_key, slot);
btrfs_mark_buffer_dirty(eb);
if (slot == 0)
- fixup_low_keys(trans, root, path, &disk_key, 1);
+ fixup_low_keys(root, path, &disk_key, 1);
}
/*
push_items = min(src_nritems - 8, push_items);
tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
- push_items, 1);
+ push_items);
copy_extent_buffer(dst, src,
btrfs_node_key_ptr_offset(dst_nritems),
btrfs_node_key_ptr_offset(0),
sizeof(struct btrfs_key_ptr));
tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
- src_nritems - push_items, push_items, 1);
+ src_nritems - push_items, push_items);
copy_extent_buffer(dst, src,
btrfs_node_key_ptr_offset(0),
btrfs_node_key_ptr_offset(src_nritems - push_items),
*/
static noinline int insert_new_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
- struct btrfs_path *path, int level)
+ struct btrfs_path *path, int level, int log_removal)
{
u64 lower_gen;
struct extent_buffer *lower;
btrfs_mark_buffer_dirty(c);
old = root->node;
- tree_mod_log_set_root_pointer(root, c);
+ tree_mod_log_set_root_pointer(root, c, log_removal);
rcu_assign_pointer(root->node, c);
/* the super has an extra ref to root->node */
int mid;
int ret;
u32 c_nritems;
- int tree_mod_log_removal = 1;
c = path->nodes[level];
WARN_ON(btrfs_header_generation(c) != trans->transid);
if (c == root->node) {
- /* trying to split the root, lets make a new one */
- ret = insert_new_root(trans, root, path, level + 1);
/*
- * removal of root nodes has been logged by
- * tree_mod_log_set_root_pointer due to locking
+ * trying to split the root, lets make a new one
+ *
+ * tree mod log: We pass 0 as log_removal parameter to
+ * insert_new_root, because that root buffer will be kept as a
+ * normal node. We are going to log removal of half of the
+ * elements below with tree_mod_log_eb_copy. We're holding a
+ * tree lock on the buffer, which is why we cannot race with
+ * other tree_mod_log users.
*/
- tree_mod_log_removal = 0;
+ ret = insert_new_root(trans, root, path, level + 1, 0);
if (ret)
return ret;
} else {
(unsigned long)btrfs_header_chunk_tree_uuid(split),
BTRFS_UUID_SIZE);
- tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid,
- tree_mod_log_removal);
+ tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
copy_extent_buffer(split, c,
btrfs_node_key_ptr_offset(0),
btrfs_node_key_ptr_offset(mid),
clean_tree_block(trans, root, right);
btrfs_item_key(right, &disk_key, 0);
- fixup_low_keys(trans, root, path, &disk_key, 1);
+ fixup_low_keys(root, path, &disk_key, 1);
/* then fixup the leaf pointer in the path */
if (path->slots[0] < push_items) {
}
if (!path->nodes[1]) {
- ret = insert_new_root(trans, root, path, 1);
+ ret = insert_new_root(trans, root, path, 1, 1);
if (ret)
return ret;
}
path->nodes[0] = right;
path->slots[0] = 0;
if (path->slots[1] == 0)
- fixup_low_keys(trans, root, path,
- &disk_key, 1);
+ fixup_low_keys(root, path, &disk_key, 1);
}
btrfs_mark_buffer_dirty(right);
return ret;
return ret;
path->slots[0]++;
- setup_items_for_insert(trans, root, path, new_key, &item_size,
+ setup_items_for_insert(root, path, new_key, &item_size,
item_size, item_size +
sizeof(struct btrfs_item), 1);
leaf = path->nodes[0];
* off the end of the item or if we shift the item to chop bytes off
* the front.
*/
-void btrfs_truncate_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
+void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
u32 new_size, int from_end)
{
int slot;
btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
btrfs_set_item_key(leaf, &disk_key, slot);
if (slot == 0)
- fixup_low_keys(trans, root, path, &disk_key, 1);
+ fixup_low_keys(root, path, &disk_key, 1);
}
item = btrfs_item_nr(leaf, slot);
/*
* make the item pointed to by the path bigger, data_size is the new size.
*/
-void btrfs_extend_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
+void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
u32 data_size)
{
int slot;
* to save stack depth by doing the bulk of the work in a function
* that doesn't call btrfs_search_slot
*/
-void setup_items_for_insert(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
+void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_key *cpu_key, u32 *data_size,
u32 total_data, u32 total_size, int nr)
{
if (slot == 0) {
btrfs_cpu_key_to_disk(&disk_key, cpu_key);
- fixup_low_keys(trans, root, path, &disk_key, 1);
+ fixup_low_keys(root, path, &disk_key, 1);
}
btrfs_unlock_up_safe(path, 1);
btrfs_mark_buffer_dirty(leaf);
slot = path->slots[0];
BUG_ON(slot < 0);
- setup_items_for_insert(trans, root, path, cpu_key, data_size,
+ setup_items_for_insert(root, path, cpu_key, data_size,
total_data, total_size, nr);
return 0;
}
* the tree should have been previously balanced so the deletion does not
* empty a node.
*/
-static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct btrfs_path *path, int level, int slot)
+static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
+ int level, int slot)
{
struct extent_buffer *parent = path->nodes[level];
u32 nritems;
struct btrfs_disk_key disk_key;
btrfs_node_key(parent, &disk_key, 0);
- fixup_low_keys(trans, root, path, &disk_key, level + 1);
+ fixup_low_keys(root, path, &disk_key, level + 1);
}
btrfs_mark_buffer_dirty(parent);
}
struct extent_buffer *leaf)
{
WARN_ON(btrfs_header_generation(leaf) != trans->transid);
- del_ptr(trans, root, path, 1, path->slots[1]);
+ del_ptr(root, path, 1, path->slots[1]);
/*
* btrfs_free_extent is expensive, we want to make sure we
struct btrfs_disk_key disk_key;
btrfs_item_key(leaf, &disk_key, 0);
- fixup_low_keys(trans, root, path, &disk_key, 1);
+ fixup_low_keys(root, path, &disk_key, 1);
}
/* delete the leaf if it is mostly empty */
return btrfs_next_old_leaf(root, path, 0);
}
-/* Release the path up to but not including the given level */
-static void btrfs_release_level(struct btrfs_path *path, int level)
-{
- int i;
-
- for (i = 0; i < level; i++) {
- path->slots[i] = 0;
- if (!path->nodes[i])
- continue;
- if (path->locks[i]) {
- btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
- path->locks[i] = 0;
- }
- free_extent_buffer(path->nodes[i]);
- path->nodes[i] = NULL;
- }
-}
-
-/*
- * This function assumes 2 things
- *
- * 1) You are using path->keep_locks
- * 2) You are not inserting items.
- *
- * If either of these are not true do not use this function. If you need a next
- * leaf with either of these not being true then this function can be easily
- * adapted to do that, but at the moment these are the limitations.
- */
-int btrfs_next_leaf_write(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
- int del)
-{
- struct extent_buffer *b;
- struct btrfs_key key;
- u32 nritems;
- int level = 1;
- int slot;
- int ret = 1;
- int write_lock_level = BTRFS_MAX_LEVEL;
- int ins_len = del ? -1 : 0;
-
- WARN_ON(!(path->keep_locks || path->really_keep_locks));
-
- nritems = btrfs_header_nritems(path->nodes[0]);
- btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
-
- while (path->nodes[level]) {
- nritems = btrfs_header_nritems(path->nodes[level]);
- if (!(path->locks[level] & BTRFS_WRITE_LOCK)) {
-search:
- btrfs_release_path(path);
- ret = btrfs_search_slot(trans, root, &key, path,
- ins_len, 1);
- if (ret < 0)
- goto out;
- level = 1;
- continue;
- }
-
- if (path->slots[level] >= nritems - 1) {
- level++;
- continue;
- }
-
- btrfs_release_level(path, level);
- break;
- }
-
- if (!path->nodes[level]) {
- ret = 1;
- goto out;
- }
-
- path->slots[level]++;
- b = path->nodes[level];
-
- while (b) {
- level = btrfs_header_level(b);
-
- if (!should_cow_block(trans, root, b))
- goto cow_done;
-
- btrfs_set_path_blocking(path);
- ret = btrfs_cow_block(trans, root, b,
- path->nodes[level + 1],
- path->slots[level + 1], &b);
- if (ret)
- goto out;
-cow_done:
- path->nodes[level] = b;
- btrfs_clear_path_blocking(path, NULL, 0);
- if (level != 0) {
- ret = setup_nodes_for_search(trans, root, path, b,
- level, ins_len,
- &write_lock_level);
- if (ret == -EAGAIN)
- goto search;
- if (ret)
- goto out;
-
- b = path->nodes[level];
- slot = path->slots[level];
-
- ret = read_block_for_search(trans, root, path,
- &b, level, slot, &key, 0);
- if (ret == -EAGAIN)
- goto search;
- if (ret)
- goto out;
- level = btrfs_header_level(b);
- if (!btrfs_try_tree_write_lock(b)) {
- btrfs_set_path_blocking(path);
- btrfs_tree_lock(b);
- btrfs_clear_path_blocking(path, b,
- BTRFS_WRITE_LOCK);
- }
- path->locks[level] = BTRFS_WRITE_LOCK;
- path->nodes[level] = b;
- path->slots[level] = 0;
- } else {
- path->slots[level] = 0;
- ret = 0;
- break;
- }
- }
-
-out:
- if (ret)
- btrfs_release_path(path);
-
- return ret;
-}
-
int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
u64 time_seq)
{
*/
#define BTRFS_FS_STATE_ERROR 0
#define BTRFS_FS_STATE_REMOUNTING 1
+#define BTRFS_FS_STATE_TRANS_ABORTED 2
/* Super block flags */
/* Errors detected */
#define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF (1ULL << 6)
#define BTRFS_FEATURE_INCOMPAT_RAID56 (1ULL << 7)
+#define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA (1ULL << 8)
#define BTRFS_FEATURE_COMPAT_SUPP 0ULL
#define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL
BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
BTRFS_FEATURE_INCOMPAT_RAID56 | \
- BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
+ BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
+ BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
/*
* A leaf is full of items. offset and size tell us where to find
unsigned int skip_locking:1;
unsigned int leave_spinning:1;
unsigned int search_commit_root:1;
- unsigned int really_keep_locks:1;
};
/*
*/
#define BTRFS_QGROUP_STATUS_FLAG_ON (1ULL << 0)
/*
- * SCANNING is set during the initialization phase
+ * RESCAN is set during the initialization phase
*/
-#define BTRFS_QGROUP_STATUS_FLAG_SCANNING (1ULL << 1)
+#define BTRFS_QGROUP_STATUS_FLAG_RESCAN (1ULL << 1)
/*
* Some qgroup entries are known to be out of date,
* either because the configuration has changed in a way that
* only used during scanning to record the progress
* of the scan. It contains a logical address
*/
- __le64 scan;
+ __le64 rescan;
} __attribute__ ((__packed__));
struct btrfs_qgroup_info_item {
wait_queue_head_t transaction_blocked_wait;
wait_queue_head_t async_submit_wait;
+ /*
+ * Used to protect the incompat_flags, compat_flags, compat_ro_flags
+ * when they are updated.
+ *
+ * Because we do not clear the flags for ever, so we needn't use
+ * the lock on the read side.
+ *
+ * We also needn't use the lock when we mount the fs, because
+ * there is no other task which will update the flag.
+ */
+ spinlock_t super_lock;
struct btrfs_super_block *super_copy;
struct btrfs_super_block *super_for_commit;
struct block_device *__bdev;
/* this protects tree_mod_seq_list */
spinlock_t tree_mod_seq_lock;
- atomic_t tree_mod_seq;
+ atomic64_t tree_mod_seq;
struct list_head tree_mod_seq_list;
struct seq_list tree_mod_seq_elem;
struct rb_root qgroup_tree;
spinlock_t qgroup_lock;
+ /* protect user change for quota operations */
+ struct mutex qgroup_ioctl_lock;
+
/* list of dirty qgroups to be written at next commit */
struct list_head dirty_qgroups;
/* used by btrfs_qgroup_record_ref for an efficient tree traversal */
u64 qgroup_seq;
+ /* qgroup rescan items */
+ struct mutex qgroup_rescan_lock; /* protects the progress item */
+ struct btrfs_key qgroup_rescan_progress;
+ struct btrfs_workers qgroup_rescan_workers;
+
/* filesystem state */
unsigned long fs_state;
*/
#define BTRFS_EXTENT_ITEM_KEY 168
+/*
+ * The same as the BTRFS_EXTENT_ITEM_KEY, except it's metadata we already know
+ * the length, so we save the level in key->offset instead of the length.
+ */
+#define BTRFS_METADATA_ITEM_KEY 169
+
#define BTRFS_TREE_BLOCK_REF_KEY 176
#define BTRFS_EXTENT_DATA_REF_KEY 178
static inline int btrfs_super_csum_size(struct btrfs_super_block *s)
{
- int t = btrfs_super_csum_type(s);
- BUG_ON(t >= ARRAY_SIZE(btrfs_csum_sizes));
+ u16 t = btrfs_super_csum_type(s);
+ /*
+ * csum type is validated at mount time
+ */
return btrfs_csum_sizes[t];
}
version, 64);
BTRFS_SETGET_FUNCS(qgroup_status_flags, struct btrfs_qgroup_status_item,
flags, 64);
-BTRFS_SETGET_FUNCS(qgroup_status_scan, struct btrfs_qgroup_status_item,
- scan, 64);
+BTRFS_SETGET_FUNCS(qgroup_status_rescan, struct btrfs_qgroup_status_item,
+ rescan, 64);
/* btrfs_qgroup_info_item */
BTRFS_SETGET_FUNCS(qgroup_info_generation, struct btrfs_qgroup_info_item,
int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len);
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr,
- u64 num_bytes, u64 *refs, u64 *flags);
+ u64 offset, int metadata, u64 *refs, u64 *flags);
int btrfs_pin_extent(struct btrfs_root *root,
u64 bytenr, u64 num, int reserved);
int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
struct btrfs_fs_info *info,
u64 bytenr);
void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
-u64 btrfs_find_block_group(struct btrfs_root *root,
- u64 search_start, u64 search_hint, int owner);
struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u32 blocksize,
u64 parent, u64 root_objectid,
struct btrfs_root *root,
struct extent_buffer *buf,
u64 parent, int last_ref);
-struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 bytenr, u32 blocksize,
- int level);
int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 root_objectid, u64 owner,
struct btrfs_root *root,
u64 num_bytes, u64 min_alloc_size,
u64 empty_size, u64 hint_byte,
- struct btrfs_key *ins, u64 data);
+ struct btrfs_key *ins, int is_data);
int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf, int full_backref, int for_cow);
int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_root *root, u64 group_start);
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
-u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
int btrfs_previous_item(struct btrfs_root *root,
struct btrfs_path *path, u64 min_objectid,
int type);
-void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
+void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_key *new_key);
struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
struct extent_buffer **cow_ret, u64 new_root_objectid);
int btrfs_block_can_be_shared(struct btrfs_root *root,
struct extent_buffer *buf);
-void btrfs_extend_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
+void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
u32 data_size);
-void btrfs_truncate_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
+void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
u32 new_size, int from_end);
int btrfs_split_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
return btrfs_del_items(trans, root, path, path->slots[0], 1);
}
-void setup_items_for_insert(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
+void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_key *cpu_key, u32 *data_size,
u32 total_data, u32 total_size, int nr);
int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
}
int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
-int btrfs_next_leaf_write(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_path *path,
- int del);
int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
u64 time_seq);
static inline int btrfs_next_old_item(struct btrfs_root *root,
{
return btrfs_next_old_item(root, p, 0);
}
-int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
int __must_check btrfs_drop_snapshot(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
struct seq_list *elem);
void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
struct seq_list *elem);
-static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
-{
- return atomic_inc_return(&fs_info->tree_mod_seq);
-}
+u64 btrfs_tree_mod_seq_prev(u64 seq);
int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
/* root-item.c */
struct btrfs_root *root,
struct btrfs_key *key,
struct btrfs_root_item *item);
-void btrfs_read_root_item(struct btrfs_root *root,
- struct extent_buffer *eb, int slot,
- struct btrfs_root_item *item);
+void btrfs_read_root_item(struct extent_buffer *eb, int slot,
+ struct btrfs_root_item *item);
int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
btrfs_root_item *item, struct btrfs_key *key);
int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
btrfs_search_dir_index_item(struct btrfs_root *root,
struct btrfs_path *path, u64 dirid,
const char *name, int name_len);
-struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
- struct btrfs_path *path,
- const char *name, int name_len);
int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_root *root,
struct btrfs_path *path, u64 objectid,
u64 bytenr, int mod);
-u64 btrfs_file_extent_length(struct btrfs_path *path);
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_ordered_sum *sums);
int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u64 file_start, int contig);
-struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- u64 bytenr, int cow);
int btrfs_csum_truncate(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_path *path,
u64 isize);
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
struct extent_state **cached_state);
-int btrfs_writepages(struct address_space *mapping,
- struct writeback_control *wbc);
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct btrfs_root *new_root, u64 new_dirid);
int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
-int btrfs_dirty_inode(struct inode *inode);
struct inode *btrfs_alloc_inode(struct super_block *sb);
void btrfs_destroy_inode(struct inode *inode);
int btrfs_drop_inode(struct inode *inode);
int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode);
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
-int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
int btrfs_orphan_cleanup(struct btrfs_root *root);
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct inode *inode, u64 start, u64 end);
int btrfs_release_file(struct inode *inode, struct file *file);
-void btrfs_drop_pages(struct page **pages, size_t num_pages);
int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
struct page **pages, size_t num_pages,
loff_t pos, size_t write_bytes,
#ifdef CONFIG_PRINTK
__printf(2, 3)
-void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...);
+void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...);
#else
static inline __printf(2, 3)
-void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...)
+void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
{
}
#endif
+#define btrfs_emerg(fs_info, fmt, args...) \
+ btrfs_printk(fs_info, KERN_EMERG fmt, ##args)
+#define btrfs_alert(fs_info, fmt, args...) \
+ btrfs_printk(fs_info, KERN_ALERT fmt, ##args)
+#define btrfs_crit(fs_info, fmt, args...) \
+ btrfs_printk(fs_info, KERN_CRIT fmt, ##args)
+#define btrfs_err(fs_info, fmt, args...) \
+ btrfs_printk(fs_info, KERN_ERR fmt, ##args)
+#define btrfs_warn(fs_info, fmt, args...) \
+ btrfs_printk(fs_info, KERN_WARNING fmt, ##args)
+#define btrfs_notice(fs_info, fmt, args...) \
+ btrfs_printk(fs_info, KERN_NOTICE fmt, ##args)
+#define btrfs_info(fs_info, fmt, args...) \
+ btrfs_printk(fs_info, KERN_INFO fmt, ##args)
+#define btrfs_debug(fs_info, fmt, args...) \
+ btrfs_printk(fs_info, KERN_DEBUG fmt, ##args)
+
__printf(5, 6)
void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
unsigned int line, int errno, const char *fmt, ...);
disk_super = fs_info->super_copy;
features = btrfs_super_incompat_flags(disk_super);
if (!(features & flag)) {
- features |= flag;
- btrfs_set_super_incompat_flags(disk_super, features);
+ spin_lock(&fs_info->super_lock);
+ features = btrfs_super_incompat_flags(disk_super);
+ if (!(features & flag)) {
+ features |= flag;
+ btrfs_set_super_incompat_flags(disk_super, features);
+ printk(KERN_INFO "btrfs: setting %llu feature flag\n",
+ flag);
+ }
+ spin_unlock(&fs_info->super_lock);
}
}
+#define btrfs_fs_incompat(fs_info, opt) \
+ __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
+
+static inline int __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag)
+{
+ struct btrfs_super_block *disk_super;
+ disk_super = fs_info->super_copy;
+ return !!(btrfs_super_incompat_flags(disk_super) & flag);
+}
+
/*
* Call btrfs_abort_transaction as early as possible when an error condition is
* detected, that way the exact line number is reported.
int btrfs_scrub_cancel(struct btrfs_fs_info *info);
int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info,
struct btrfs_device *dev);
-int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid);
int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
struct btrfs_scrub_progress *progress);
struct btrfs_fs_info *fs_info);
int btrfs_quota_disable(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
-int btrfs_quota_rescan(struct btrfs_fs_info *fs_info);
+int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 src, u64 dst);
int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
spin_unlock(&root->lock);
}
-struct btrfs_delayed_node *btrfs_first_delayed_node(
+static struct btrfs_delayed_node *btrfs_first_delayed_node(
struct btrfs_delayed_root *delayed_root)
{
struct list_head *p;
return node;
}
-struct btrfs_delayed_node *btrfs_next_delayed_node(
+static struct btrfs_delayed_node *btrfs_next_delayed_node(
struct btrfs_delayed_node *node)
{
struct btrfs_delayed_root *delayed_root;
__btrfs_release_delayed_node(node, 0);
}
-struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
+static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
struct btrfs_delayed_root *delayed_root)
{
struct list_head *p;
__btrfs_release_delayed_node(node, 1);
}
-struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
+static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
{
struct btrfs_delayed_item *item;
item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
return NULL;
}
-struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
+static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
struct btrfs_delayed_node *delayed_node,
struct btrfs_key *key)
{
return item;
}
-struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item(
- struct btrfs_delayed_node *delayed_node,
- struct btrfs_key *key)
-{
- struct btrfs_delayed_item *item;
-
- item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
- NULL, NULL);
- return item;
-}
-
-struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item(
- struct btrfs_delayed_node *delayed_node,
- struct btrfs_key *key)
-{
- struct btrfs_delayed_item *item, *next;
-
- item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
- NULL, &next);
- if (!item)
- item = next;
-
- return item;
-}
-
-struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item(
- struct btrfs_delayed_node *delayed_node,
- struct btrfs_key *key)
-{
- struct btrfs_delayed_item *item, *next;
-
- item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
- NULL, &next);
- if (!item)
- item = next;
-
- return item;
-}
-
static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
struct btrfs_delayed_item *ins,
int action)
}
}
-struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
+static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
struct btrfs_delayed_node *delayed_node)
{
struct rb_node *p;
return item;
}
-struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
+static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
struct btrfs_delayed_node *delayed_node)
{
struct rb_node *p;
return item;
}
-struct btrfs_delayed_item *__btrfs_next_delayed_item(
+static struct btrfs_delayed_item *__btrfs_next_delayed_item(
struct btrfs_delayed_item *item)
{
struct rb_node *p;
* This helper will insert some continuous items into the same leaf according
* to the free space of the leaf.
*/
-static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_delayed_item *item)
+static int btrfs_batch_insert_items(struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_delayed_item *item)
{
struct btrfs_delayed_item *curr, *next;
int free_space;
btrfs_clear_path_blocking(path, NULL, 0);
/* insert the keys of the items */
- setup_items_for_insert(trans, root, path, keys, data_size,
+ setup_items_for_insert(root, path, keys, data_size,
total_data_size, total_size, nitems);
/* insert the dir index items */
if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
/* insert the continuous items into the same leaf */
path->slots[0]++;
- btrfs_batch_insert_items(trans, root, path, curr);
+ btrfs_batch_insert_items(root, path, curr);
}
btrfs_release_delayed_item(prev);
btrfs_mark_buffer_dirty(path->nodes[0]);
* compare two delayed tree backrefs with same bytenr and type
*/
static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
- struct btrfs_delayed_tree_ref *ref1)
+ struct btrfs_delayed_tree_ref *ref1, int type)
{
- if (ref1->root < ref2->root)
- return -1;
- if (ref1->root > ref2->root)
- return 1;
- if (ref1->parent < ref2->parent)
- return -1;
- if (ref1->parent > ref2->parent)
- return 1;
+ if (type == BTRFS_TREE_BLOCK_REF_KEY) {
+ if (ref1->root < ref2->root)
+ return -1;
+ if (ref1->root > ref2->root)
+ return 1;
+ } else {
+ if (ref1->parent < ref2->parent)
+ return -1;
+ if (ref1->parent > ref2->parent)
+ return 1;
+ }
return 0;
}
if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
- btrfs_delayed_node_to_tree_ref(ref1));
+ btrfs_delayed_node_to_tree_ref(ref1),
+ ref1->type);
} else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
elem = list_first_entry(&fs_info->tree_mod_seq_list,
struct seq_list, list);
if (seq >= elem->seq) {
- pr_debug("holding back delayed_ref %llu, lowest is "
- "%llu (%p)\n", seq, elem->seq, delayed_refs);
+ pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
+ (u32)(seq >> 32), (u32)seq,
+ (u32)(elem->seq >> 32), (u32)elem->seq,
+ delayed_refs);
ret = 1;
}
}
#include "hash.h"
#include "transaction.h"
+static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
+ struct btrfs_path *path,
+ const char *name, int name_len);
+
/*
* insert a name into a directory, doing overflow properly if there is a hash
* collision. data_size indicates how big the item inserted should be. On
di = btrfs_match_dir_item_name(root, path, name, name_len);
if (di)
return ERR_PTR(-EEXIST);
- btrfs_extend_item(trans, root, path, data_size);
+ btrfs_extend_item(root, path, data_size);
} else if (ret < 0)
return ERR_PTR(ret);
WARN_ON(ret > 0);
* this walks through all the entries in a dir item and finds one
* for a specific name.
*/
-struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
+static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
struct btrfs_path *path,
const char *name, int name_len)
{
start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_len - (ptr + sub_item_len - start));
- btrfs_truncate_item(trans, root, path,
- item_len - sub_item_len, 1);
+ btrfs_truncate_item(root, path, item_len - sub_item_len, 1);
}
return ret;
}
#include <linux/slab.h>
#include <linux/migrate.h>
#include <linux/ratelimit.h>
+#include <linux/uuid.h>
#include <asm/unaligned.h>
#include "compat.h"
#include "ctree.h"
int mark);
static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
struct extent_io_tree *pinned_extents);
+static int btrfs_cleanup_transaction(struct btrfs_root *root);
+static void btrfs_error_commit_super(struct btrfs_root *root);
/*
* end_io_wq structs are used to do processing in task context when an IO is
em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em);
+ ret = add_extent_mapping(em_tree, em, 0);
if (ret == -EEXIST) {
free_extent_map(em);
em = lookup_extent_mapping(em_tree, start, len);
return em;
}
-u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
+u32 btrfs_csum_data(char *data, u32 seed, size_t len)
{
return crc32c(seed, data, len);
}
if (err)
return 1;
cur_len = min(len, map_len - (offset - map_start));
- crc = btrfs_csum_data(root, kaddr + offset - map_start,
+ crc = btrfs_csum_data(kaddr + offset - map_start,
crc, cur_len);
len -= cur_len;
offset += cur_len;
return ret;
}
+/*
+ * Return 0 if the superblock checksum type matches the checksum value of that
+ * algorithm. Pass the raw disk superblock data.
+ */
+static int btrfs_check_super_csum(char *raw_disk_sb)
+{
+ struct btrfs_super_block *disk_sb =
+ (struct btrfs_super_block *)raw_disk_sb;
+ u16 csum_type = btrfs_super_csum_type(disk_sb);
+ int ret = 0;
+
+ if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
+ u32 crc = ~(u32)0;
+ const int csum_size = sizeof(crc);
+ char result[csum_size];
+
+ /*
+ * The super_block structure does not span the whole
+ * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
+ * is filled with zeros and is included in the checkum.
+ */
+ crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
+ crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
+ btrfs_csum_final(crc, result);
+
+ if (memcmp(raw_disk_sb, result, csum_size))
+ ret = 1;
+
+ if (ret && btrfs_super_generation(disk_sb) < 10) {
+ printk(KERN_WARNING "btrfs: super block crcs don't match, older mkfs detected\n");
+ ret = 0;
+ }
+ }
+
+ if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
+ printk(KERN_ERR "btrfs: unsupported checksum algorithm %u\n",
+ csum_type);
+ ret = 1;
+ }
+
+ return ret;
+}
+
/*
* helper to read a given tree block, doing retries as required when
* the checksums don't match and we have alternate mirrors to try.
return 0;
}
-struct extent_buffer *find_eb_for_page(struct extent_io_tree *tree,
- struct page *page, int max_walk)
-{
- struct extent_buffer *eb;
- u64 start = page_offset(page);
- u64 target = start;
- u64 min_start;
-
- if (start < max_walk)
- min_start = 0;
- else
- min_start = start - max_walk;
-
- while (start >= min_start) {
- eb = find_extent_buffer(tree, start, 0);
- if (eb) {
- /*
- * we found an extent buffer and it contains our page
- * horray!
- */
- if (eb->start <= target &&
- eb->start + eb->len > target)
- return eb;
-
- /* we found an extent buffer that wasn't for us */
- free_extent_buffer(eb);
- return NULL;
- }
- if (start == 0)
- break;
- start -= PAGE_CACHE_SIZE;
- }
- return NULL;
-}
-
static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state, int mirror)
{
goto err;
}
found_level = btrfs_header_level(eb);
+ if (found_level >= BTRFS_MAX_LEVEL) {
+ btrfs_info(root->fs_info, "bad tree block level %d\n",
+ (int)btrfs_header_level(eb));
+ ret = -EIO;
+ goto err;
+ }
btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
eb, found_level);
if (!ret)
set_extent_buffer_uptodate(eb);
err:
- if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
- clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
+ if (reads_done &&
+ test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
btree_readahead_hook(root, eb, eb->start, ret);
- }
if (ret) {
/*
{
if (PageWriteback(page) || PageDirty(page))
return 0;
- /*
- * We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing
- * slab allocation from alloc_extent_state down the callchain where
- * it'd hit a BUG_ON as those flags are not allowed.
- */
- gfp_flags &= ~GFP_SLAB_BUG_MASK;
- return try_release_extent_buffer(page, gfp_flags);
+ return try_release_extent_buffer(page);
}
static void btree_invalidatepage(struct page *page, unsigned long offset)
struct btrfs_key key;
int ret = 0;
u64 bytenr;
+ uuid_le uuid;
root = btrfs_alloc_root(fs_info);
if (!root)
btrfs_set_root_used(&root->root_item, leaf->len);
btrfs_set_root_last_snapshot(&root->root_item, 0);
btrfs_set_root_dirid(&root->root_item, 0);
+ uuid_le_gen(&uuid);
+ memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
root->root_item.drop_level = 0;
key.objectid = objectid;
if (ret == 0) {
l = path->nodes[0];
slot = path->slots[0];
- btrfs_read_root_item(tree_root, l, slot, &root->root_item);
+ btrfs_read_root_item(l, slot, &root->root_item);
memcpy(&root->root_key, location, sizeof(*location));
}
btrfs_free_path(path);
blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
blocksize, generation);
+ if (!root->node || !extent_buffer_uptodate(root->node)) {
+ ret = (!root->node) ? -ENOMEM : -EIO;
+
+ free_extent_buffer(root->node);
+ kfree(root);
+ return ERR_PTR(ret);
+ }
+
root->commit_root = btrfs_root_node(root);
BUG_ON(!root->node); /* -ENOMEM */
out:
struct btrfs_root *root = arg;
do {
+ int again = 0;
+
if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
- mutex_trylock(&root->fs_info->cleaner_mutex)) {
- btrfs_run_delayed_iputs(root);
- btrfs_clean_old_snapshots(root);
- mutex_unlock(&root->fs_info->cleaner_mutex);
+ down_read_trylock(&root->fs_info->sb->s_umount)) {
+ if (mutex_trylock(&root->fs_info->cleaner_mutex)) {
+ btrfs_run_delayed_iputs(root);
+ again = btrfs_clean_one_deleted_snapshot(root);
+ mutex_unlock(&root->fs_info->cleaner_mutex);
+ }
btrfs_run_defrag_inodes(root->fs_info);
+ up_read(&root->fs_info->sb->s_umount);
}
- if (!try_to_freeze()) {
+ if (!try_to_freeze() && !again) {
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop())
schedule();
return 0;
}
+/* helper to cleanup workers */
+static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
+{
+ btrfs_stop_workers(&fs_info->generic_worker);
+ btrfs_stop_workers(&fs_info->fixup_workers);
+ btrfs_stop_workers(&fs_info->delalloc_workers);
+ btrfs_stop_workers(&fs_info->workers);
+ btrfs_stop_workers(&fs_info->endio_workers);
+ btrfs_stop_workers(&fs_info->endio_meta_workers);
+ btrfs_stop_workers(&fs_info->endio_raid56_workers);
+ btrfs_stop_workers(&fs_info->rmw_workers);
+ btrfs_stop_workers(&fs_info->endio_meta_write_workers);
+ btrfs_stop_workers(&fs_info->endio_write_workers);
+ btrfs_stop_workers(&fs_info->endio_freespace_worker);
+ btrfs_stop_workers(&fs_info->submit_workers);
+ btrfs_stop_workers(&fs_info->delayed_workers);
+ btrfs_stop_workers(&fs_info->caching_workers);
+ btrfs_stop_workers(&fs_info->readahead_workers);
+ btrfs_stop_workers(&fs_info->flush_workers);
+ btrfs_stop_workers(&fs_info->qgroup_rescan_workers);
+}
+
/* helper to cleanup tree roots */
static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
{
}
}
+static void del_fs_roots(struct btrfs_fs_info *fs_info)
+{
+ int ret;
+ struct btrfs_root *gang[8];
+ int i;
+
+ while (!list_empty(&fs_info->dead_roots)) {
+ gang[0] = list_entry(fs_info->dead_roots.next,
+ struct btrfs_root, root_list);
+ list_del(&gang[0]->root_list);
+
+ if (gang[0]->in_radix) {
+ btrfs_free_fs_root(fs_info, gang[0]);
+ } else {
+ free_extent_buffer(gang[0]->node);
+ free_extent_buffer(gang[0]->commit_root);
+ kfree(gang[0]);
+ }
+ }
+
+ while (1) {
+ ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
+ (void **)gang, 0,
+ ARRAY_SIZE(gang));
+ if (!ret)
+ break;
+ for (i = 0; i < ret; i++)
+ btrfs_free_fs_root(fs_info, gang[i]);
+ }
+}
int open_ctree(struct super_block *sb,
struct btrfs_fs_devices *fs_devices,
spin_lock_init(&fs_info->defrag_inodes_lock);
spin_lock_init(&fs_info->free_chunk_lock);
spin_lock_init(&fs_info->tree_mod_seq_lock);
+ spin_lock_init(&fs_info->super_lock);
rwlock_init(&fs_info->tree_mod_log_lock);
mutex_init(&fs_info->reloc_mutex);
seqlock_init(&fs_info->profiles_lock);
atomic_set(&fs_info->async_submit_draining, 0);
atomic_set(&fs_info->nr_async_bios, 0);
atomic_set(&fs_info->defrag_running, 0);
- atomic_set(&fs_info->tree_mod_seq, 0);
+ atomic64_set(&fs_info->tree_mod_seq, 0);
fs_info->sb = sb;
fs_info->max_inline = 8192 * 1024;
fs_info->metadata_ratio = 0;
mutex_init(&fs_info->dev_replace.lock);
spin_lock_init(&fs_info->qgroup_lock);
+ mutex_init(&fs_info->qgroup_ioctl_lock);
fs_info->qgroup_tree = RB_ROOT;
INIT_LIST_HEAD(&fs_info->dirty_qgroups);
fs_info->qgroup_seq = 1;
fs_info->quota_enabled = 0;
fs_info->pending_quota_state = 0;
+ mutex_init(&fs_info->qgroup_rescan_lock);
btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
fs_info, BTRFS_ROOT_TREE_OBJECTID);
invalidate_bdev(fs_devices->latest_bdev);
+
+ /*
+ * Read super block and check the signature bytes only
+ */
bh = btrfs_read_dev_super(fs_devices->latest_bdev);
if (!bh) {
err = -EINVAL;
goto fail_alloc;
}
+ /*
+ * We want to check superblock checksum, the type is stored inside.
+ * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
+ */
+ if (btrfs_check_super_csum(bh->b_data)) {
+ printk(KERN_ERR "btrfs: superblock checksum mismatch\n");
+ err = -EINVAL;
+ goto fail_alloc;
+ }
+
+ /*
+ * super_copy is zeroed at allocation time and we never touch the
+ * following bytes up to INFO_SIZE, the checksum is calculated from
+ * the whole block of INFO_SIZE
+ */
memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
memcpy(fs_info->super_for_commit, fs_info->super_copy,
sizeof(*fs_info->super_for_commit));
memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
+ ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
+ if (ret) {
+ printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
+ err = -EINVAL;
+ goto fail_alloc;
+ }
+
disk_super = fs_info->super_copy;
if (!btrfs_super_root(disk_super))
goto fail_alloc;
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
- ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
- if (ret) {
- printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
- err = ret;
- goto fail_alloc;
- }
-
/*
* run through our array of backup supers and setup
* our ring pointer to the oldest one
if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
+ if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
+ printk(KERN_ERR "btrfs: has skinny extents\n");
+
/*
* flag our filesystem as having big metadata blocks if
* they are bigger than the page size
goto fail_alloc;
}
+ /*
+ * Needn't use the lock because there is no other task which will
+ * update the flag.
+ */
btrfs_set_super_incompat_flags(disk_super, features);
features = btrfs_super_compat_ro_flags(disk_super) &
btrfs_init_workers(&fs_info->readahead_workers, "readahead",
fs_info->thread_pool_size,
&fs_info->generic_worker);
+ btrfs_init_workers(&fs_info->qgroup_rescan_workers, "qgroup-rescan", 1,
+ &fs_info->generic_worker);
/*
* endios are largely parallel and should have a very
ret |= btrfs_start_workers(&fs_info->caching_workers);
ret |= btrfs_start_workers(&fs_info->readahead_workers);
ret |= btrfs_start_workers(&fs_info->flush_workers);
+ ret |= btrfs_start_workers(&fs_info->qgroup_rescan_workers);
if (ret) {
err = -ENOMEM;
goto fail_sb_buffer;
chunk_root->node = read_tree_block(chunk_root,
btrfs_super_chunk_root(disk_super),
blocksize, generation);
- BUG_ON(!chunk_root->node); /* -ENOMEM */
- if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
+ if (!chunk_root->node ||
+ !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
sb->s_id);
goto fail_tree_roots;
log_tree_root->node = read_tree_block(tree_root, bytenr,
blocksize,
generation + 1);
+ if (!log_tree_root->node ||
+ !extent_buffer_uptodate(log_tree_root->node)) {
+ printk(KERN_ERR "btrfs: failed to read log tree\n");
+ free_extent_buffer(log_tree_root->node);
+ kfree(log_tree_root);
+ goto fail_trans_kthread;
+ }
/* returns with log_tree_root freed on success */
ret = btrfs_recover_log_trees(log_tree_root);
if (ret) {
btrfs_free_qgroup_config(fs_info);
fail_trans_kthread:
kthread_stop(fs_info->transaction_kthread);
+ del_fs_roots(fs_info);
+ btrfs_cleanup_transaction(fs_info->tree_root);
fail_cleaner:
kthread_stop(fs_info->cleaner_kthread);
filemap_write_and_wait(fs_info->btree_inode->i_mapping);
fail_block_groups:
+ btrfs_put_block_group_cache(fs_info);
btrfs_free_block_groups(fs_info);
fail_tree_roots:
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
fail_sb_buffer:
- btrfs_stop_workers(&fs_info->generic_worker);
- btrfs_stop_workers(&fs_info->readahead_workers);
- btrfs_stop_workers(&fs_info->fixup_workers);
- btrfs_stop_workers(&fs_info->delalloc_workers);
- btrfs_stop_workers(&fs_info->workers);
- btrfs_stop_workers(&fs_info->endio_workers);
- btrfs_stop_workers(&fs_info->endio_meta_workers);
- btrfs_stop_workers(&fs_info->endio_raid56_workers);
- btrfs_stop_workers(&fs_info->rmw_workers);
- btrfs_stop_workers(&fs_info->endio_meta_write_workers);
- btrfs_stop_workers(&fs_info->endio_write_workers);
- btrfs_stop_workers(&fs_info->endio_freespace_worker);
- btrfs_stop_workers(&fs_info->submit_workers);
- btrfs_stop_workers(&fs_info->delayed_workers);
- btrfs_stop_workers(&fs_info->caching_workers);
- btrfs_stop_workers(&fs_info->flush_workers);
+ btrfs_stop_all_workers(fs_info);
fail_alloc:
fail_iput:
btrfs_mapping_tree_free(&fs_info->mapping_tree);
if (wait) {
bh = __find_get_block(device->bdev, bytenr / 4096,
BTRFS_SUPER_INFO_SIZE);
- BUG_ON(!bh);
+ if (!bh) {
+ errors++;
+ continue;
+ }
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
errors++;
btrfs_set_super_bytenr(sb, bytenr);
crc = ~(u32)0;
- crc = btrfs_csum_data(NULL, (char *)sb +
+ crc = btrfs_csum_data((char *)sb +
BTRFS_CSUM_SIZE, crc,
BTRFS_SUPER_INFO_SIZE -
BTRFS_CSUM_SIZE);
*/
bh = __getblk(device->bdev, bytenr / 4096,
BTRFS_SUPER_INFO_SIZE);
+ if (!bh) {
+ printk(KERN_ERR "btrfs: couldn't get super "
+ "buffer head for bytenr %Lu\n", bytenr);
+ errors++;
+ continue;
+ }
+
memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
/* one reference for submit_bh */
return num_tolerated_disk_barrier_failures;
}
-int write_all_supers(struct btrfs_root *root, int max_mirrors)
+static int write_all_supers(struct btrfs_root *root, int max_mirrors)
{
struct list_head *head;
struct btrfs_device *dev;
kfree(root);
}
-static void del_fs_roots(struct btrfs_fs_info *fs_info)
-{
- int ret;
- struct btrfs_root *gang[8];
- int i;
-
- while (!list_empty(&fs_info->dead_roots)) {
- gang[0] = list_entry(fs_info->dead_roots.next,
- struct btrfs_root, root_list);
- list_del(&gang[0]->root_list);
-
- if (gang[0]->in_radix) {
- btrfs_free_fs_root(fs_info, gang[0]);
- } else {
- free_extent_buffer(gang[0]->node);
- free_extent_buffer(gang[0]->commit_root);
- kfree(gang[0]);
- }
- }
-
- while (1) {
- ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
- (void **)gang, 0,
- ARRAY_SIZE(gang));
- if (!ret)
- break;
- for (i = 0; i < ret; i++)
- btrfs_free_fs_root(fs_info, gang[i]);
- }
-}
-
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
{
u64 root_objectid = 0;
mutex_lock(&root->fs_info->cleaner_mutex);
btrfs_run_delayed_iputs(root);
- btrfs_clean_old_snapshots(root);
mutex_unlock(&root->fs_info->cleaner_mutex);
+ wake_up_process(root->fs_info->cleaner_kthread);
/* wait until ongoing cleanup work done */
down_write(&root->fs_info->cleanup_work_sem);
percpu_counter_sum(&fs_info->delalloc_bytes));
}
- free_extent_buffer(fs_info->extent_root->node);
- free_extent_buffer(fs_info->extent_root->commit_root);
- free_extent_buffer(fs_info->tree_root->node);
- free_extent_buffer(fs_info->tree_root->commit_root);
- free_extent_buffer(fs_info->chunk_root->node);
- free_extent_buffer(fs_info->chunk_root->commit_root);
- free_extent_buffer(fs_info->dev_root->node);
- free_extent_buffer(fs_info->dev_root->commit_root);
- free_extent_buffer(fs_info->csum_root->node);
- free_extent_buffer(fs_info->csum_root->commit_root);
- if (fs_info->quota_root) {
- free_extent_buffer(fs_info->quota_root->node);
- free_extent_buffer(fs_info->quota_root->commit_root);
- }
+ free_root_pointers(fs_info, 1);
btrfs_free_block_groups(fs_info);
iput(fs_info->btree_inode);
- btrfs_stop_workers(&fs_info->generic_worker);
- btrfs_stop_workers(&fs_info->fixup_workers);
- btrfs_stop_workers(&fs_info->delalloc_workers);
- btrfs_stop_workers(&fs_info->workers);
- btrfs_stop_workers(&fs_info->endio_workers);
- btrfs_stop_workers(&fs_info->endio_meta_workers);
- btrfs_stop_workers(&fs_info->endio_raid56_workers);
- btrfs_stop_workers(&fs_info->rmw_workers);
- btrfs_stop_workers(&fs_info->endio_meta_write_workers);
- btrfs_stop_workers(&fs_info->endio_write_workers);
- btrfs_stop_workers(&fs_info->endio_freespace_worker);
- btrfs_stop_workers(&fs_info->submit_workers);
- btrfs_stop_workers(&fs_info->delayed_workers);
- btrfs_stop_workers(&fs_info->caching_workers);
- btrfs_stop_workers(&fs_info->readahead_workers);
- btrfs_stop_workers(&fs_info->flush_workers);
+ btrfs_stop_all_workers(fs_info);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_test_opt(root, CHECK_INTEGRITY))
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
int read_only)
{
- if (btrfs_super_csum_type(fs_info->super_copy) >= ARRAY_SIZE(btrfs_csum_sizes)) {
- printk(KERN_ERR "btrfs: unsupported checksum algorithm\n");
- return -EINVAL;
- }
-
- if (read_only)
- return 0;
-
+ /*
+ * Placeholder for checks
+ */
return 0;
}
-void btrfs_error_commit_super(struct btrfs_root *root)
+static void btrfs_error_commit_super(struct btrfs_root *root)
{
mutex_lock(&root->fs_info->cleaner_mutex);
btrfs_run_delayed_iputs(root);
continue;
}
+ if (head->must_insert_reserved)
+ btrfs_pin_extent(root, ref->bytenr,
+ ref->num_bytes, 1);
btrfs_free_delayed_extent_op(head->extent_op);
delayed_refs->num_heads--;
if (list_empty(&head->cluster))
int mark)
{
int ret;
- struct page *page;
- struct inode *btree_inode = root->fs_info->btree_inode;
struct extent_buffer *eb;
u64 start = 0;
u64 end;
- u64 offset;
- unsigned long index;
while (1) {
ret = find_first_extent_bit(dirty_pages, start, &start, &end,
clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
while (start <= end) {
- index = start >> PAGE_CACHE_SHIFT;
- start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
- page = find_get_page(btree_inode->i_mapping, index);
- if (!page)
+ eb = btrfs_find_tree_block(root, start,
+ root->leafsize);
+ start += eb->len;
+ if (!eb)
continue;
- offset = page_offset(page);
-
- spin_lock(&dirty_pages->buffer_lock);
- eb = radix_tree_lookup(
- &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
- offset >> PAGE_CACHE_SHIFT);
- spin_unlock(&dirty_pages->buffer_lock);
- if (eb)
- ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
- &eb->bflags);
- if (PageWriteback(page))
- end_page_writeback(page);
-
- lock_page(page);
- if (PageDirty(page)) {
- clear_page_dirty_for_io(page);
- spin_lock_irq(&page->mapping->tree_lock);
- radix_tree_tag_clear(&page->mapping->page_tree,
- page_index(page),
- PAGECACHE_TAG_DIRTY);
- spin_unlock_irq(&page->mapping->tree_lock);
- }
+ wait_on_extent_buffer_writeback(eb);
- unlock_page(page);
- page_cache_release(page);
+ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
+ &eb->bflags))
+ clear_extent_buffer_dirty(eb);
+ free_extent_buffer_stale(eb);
}
}
*/
}
-int btrfs_cleanup_transaction(struct btrfs_root *root)
+static int btrfs_cleanup_transaction(struct btrfs_root *root)
{
struct btrfs_transaction *t;
LIST_HEAD(list);
btrfs_destroy_delayed_refs(t, root);
- btrfs_block_rsv_release(root,
- &root->fs_info->trans_block_rsv,
- t->dirty_pages.dirty_bytes);
-
/* FIXME: cleanup wait for commit */
t->in_commit = 1;
t->blocked = 1;
struct btrfs_root *root, int max_mirrors);
struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
int btrfs_commit_super(struct btrfs_root *root);
-void btrfs_error_commit_super(struct btrfs_root *root);
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize);
struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
int atomic);
int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
-u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len);
+u32 btrfs_csum_data(char *data, u32 seed, size_t len);
void btrfs_csum_final(u32 crc, char *result);
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
int metadata);
struct btrfs_fs_info *fs_info);
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
-int btrfs_cleanup_transaction(struct btrfs_root *root);
void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
struct btrfs_root *root);
-void btrfs_abort_devices(struct btrfs_root *root);
struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
u64 objectid);
u64 num_bytes, int reserve);
static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
u64 num_bytes);
+int btrfs_pin_extent(struct btrfs_root *root,
+ u64 bytenr, u64 num_bytes, int reserved);
static noinline int
block_group_cache_done(struct btrfs_block_group_cache *cache)
return ret;
while (nr--) {
- cache->bytes_super += stripe_len;
- ret = add_excluded_extent(root, logical[nr],
- stripe_len);
+ u64 start, len;
+
+ if (logical[nr] > cache->key.objectid +
+ cache->key.offset)
+ continue;
+
+ if (logical[nr] + stripe_len <= cache->key.objectid)
+ continue;
+
+ start = logical[nr];
+ if (start < cache->key.objectid) {
+ start = cache->key.objectid;
+ len = (logical[nr] + stripe_len) - start;
+ } else {
+ len = min_t(u64, stripe_len,
+ cache->key.objectid +
+ cache->key.offset - start);
+ }
+
+ cache->bytes_super += len;
+ ret = add_excluded_extent(root, start, len);
if (ret) {
kfree(logical);
return ret;
if (ret)
break;
- if (need_resched() ||
- btrfs_next_leaf(extent_root, path)) {
+ if (need_resched()) {
caching_ctl->progress = last;
btrfs_release_path(path);
up_read(&fs_info->extent_commit_sem);
cond_resched();
goto again;
}
+
+ ret = btrfs_next_leaf(extent_root, path);
+ if (ret < 0)
+ goto err;
+ if (ret)
+ break;
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
continue;
block_group->key.offset)
break;
- if (key.type == BTRFS_EXTENT_ITEM_KEY) {
+ if (key.type == BTRFS_EXTENT_ITEM_KEY ||
+ key.type == BTRFS_METADATA_ITEM_KEY) {
total_found += add_new_free_space(block_group,
fs_info, last,
key.objectid);
- last = key.objectid + key.offset;
+ if (key.type == BTRFS_METADATA_ITEM_KEY)
+ last = key.objectid +
+ fs_info->tree_root->leafsize;
+ else
+ last = key.objectid + key.offset;
if (total_found > (1024 * 1024 * 2)) {
total_found = 0;
rcu_read_unlock();
}
-u64 btrfs_find_block_group(struct btrfs_root *root,
- u64 search_start, u64 search_hint, int owner)
-{
- struct btrfs_block_group_cache *cache;
- u64 used;
- u64 last = max(search_hint, search_start);
- u64 group_start = 0;
- int full_search = 0;
- int factor = 9;
- int wrapped = 0;
-again:
- while (1) {
- cache = btrfs_lookup_first_block_group(root->fs_info, last);
- if (!cache)
- break;
-
- spin_lock(&cache->lock);
- last = cache->key.objectid + cache->key.offset;
- used = btrfs_block_group_used(&cache->item);
-
- if ((full_search || !cache->ro) &&
- block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
- if (used + cache->pinned + cache->reserved <
- div_factor(cache->key.offset, factor)) {
- group_start = cache->key.objectid;
- spin_unlock(&cache->lock);
- btrfs_put_block_group(cache);
- goto found;
- }
- }
- spin_unlock(&cache->lock);
- btrfs_put_block_group(cache);
- cond_resched();
- }
- if (!wrapped) {
- last = search_start;
- wrapped = 1;
- goto again;
- }
- if (!full_search && factor < 10) {
- last = search_start;
- full_search = 1;
- factor = 10;
- goto again;
- }
-found:
- return group_start;
-}
-
/* simple helper to search for an existing extent at a given offset */
int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
{
key.objectid = start;
key.offset = len;
- btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
+ key.type = BTRFS_EXTENT_ITEM_KEY;
ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
0, 0);
+ if (ret > 0) {
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ if (key.objectid == start &&
+ key.type == BTRFS_METADATA_ITEM_KEY)
+ ret = 0;
+ }
btrfs_free_path(path);
return ret;
}
/*
- * helper function to lookup reference count and flags of extent.
+ * helper function to lookup reference count and flags of a tree block.
*
* the head node for delayed ref is used to store the sum of all the
* reference count modifications queued up in the rbtree. the head
*/
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr,
- u64 num_bytes, u64 *refs, u64 *flags)
+ u64 offset, int metadata, u64 *refs, u64 *flags)
{
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_root *delayed_refs;
u64 extent_flags;
int ret;
+ /*
+ * If we don't have skinny metadata, don't bother doing anything
+ * different
+ */
+ if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
+ offset = root->leafsize;
+ metadata = 0;
+ }
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- key.objectid = bytenr;
- key.type = BTRFS_EXTENT_ITEM_KEY;
- key.offset = num_bytes;
+ if (metadata) {
+ key.objectid = bytenr;
+ key.type = BTRFS_METADATA_ITEM_KEY;
+ key.offset = offset;
+ } else {
+ key.objectid = bytenr;
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = offset;
+ }
+
if (!trans) {
path->skip_locking = 1;
path->search_commit_root = 1;
if (ret < 0)
goto out_free;
+ if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = root->leafsize;
+ btrfs_release_path(path);
+ goto again;
+ }
+
if (ret == 0) {
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
return ret;
BUG_ON(ret); /* Corruption */
- btrfs_extend_item(trans, root, path, new_size);
+ btrfs_extend_item(root, path, new_size);
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
int want;
int ret;
int err = 0;
+ bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
+ SKINNY_METADATA);
key.objectid = bytenr;
key.type = BTRFS_EXTENT_ITEM_KEY;
path->keep_locks = 1;
} else
extra_size = -1;
+
+ /*
+ * Owner is our parent level, so we can just add one to get the level
+ * for the block we are interested in.
+ */
+ if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
+ key.type = BTRFS_METADATA_ITEM_KEY;
+ key.offset = owner;
+ }
+
+again:
ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
if (ret < 0) {
err = ret;
goto out;
}
+
+ /*
+ * We may be a newly converted file system which still has the old fat
+ * extent entries for metadata, so try and see if we have one of those.
+ */
+ if (ret > 0 && skinny_metadata) {
+ skinny_metadata = false;
+ if (path->slots[0]) {
+ path->slots[0]--;
+ btrfs_item_key_to_cpu(path->nodes[0], &key,
+ path->slots[0]);
+ if (key.objectid == bytenr &&
+ key.type == BTRFS_EXTENT_ITEM_KEY &&
+ key.offset == num_bytes)
+ ret = 0;
+ }
+ if (ret) {
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = num_bytes;
+ btrfs_release_path(path);
+ goto again;
+ }
+ }
+
if (ret && !insert) {
err = -ENOENT;
goto out;
ptr = (unsigned long)(ei + 1);
end = (unsigned long)ei + item_size;
- if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+ if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
ptr += sizeof(struct btrfs_tree_block_info);
BUG_ON(ptr > end);
- } else {
- BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
}
err = -ENOENT;
* helper to add new inline back ref
*/
static noinline_for_stack
-void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+void setup_inline_extent_backref(struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
u64 parent, u64 root_objectid,
type = extent_ref_type(parent, owner);
size = btrfs_extent_inline_ref_size(type);
- btrfs_extend_item(trans, root, path, size);
+ btrfs_extend_item(root, path, size);
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei);
* helper to update/remove inline back ref
*/
static noinline_for_stack
-void update_inline_extent_backref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+void update_inline_extent_backref(struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
int refs_to_mod,
memmove_extent_buffer(leaf, ptr, ptr + size,
end - ptr - size);
item_size -= size;
- btrfs_truncate_item(trans, root, path, item_size, 1);
+ btrfs_truncate_item(root, path, item_size, 1);
}
btrfs_mark_buffer_dirty(leaf);
}
root_objectid, owner, offset, 1);
if (ret == 0) {
BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
- update_inline_extent_backref(trans, root, path, iref,
+ update_inline_extent_backref(root, path, iref,
refs_to_add, extent_op);
} else if (ret == -ENOENT) {
- setup_inline_extent_backref(trans, root, path, iref, parent,
+ setup_inline_extent_backref(root, path, iref, parent,
root_objectid, owner, offset,
refs_to_add, extent_op);
ret = 0;
BUG_ON(!is_data && refs_to_drop != 1);
if (iref) {
- update_inline_extent_backref(trans, root, path, iref,
+ update_inline_extent_backref(root, path, iref,
-refs_to_drop, NULL);
} else if (is_data) {
ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
ref_root = ref->root;
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
- if (extent_op) {
- BUG_ON(extent_op->update_key);
+ if (extent_op)
flags |= extent_op->flags_to_set;
- }
ret = alloc_reserved_file_extent(trans, root,
parent, ref_root, flags,
ref->objectid, ref->offset,
u32 item_size;
int ret;
int err = 0;
+ int metadata = (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
+ node->type == BTRFS_SHARED_BLOCK_REF_KEY);
if (trans->aborted)
return 0;
+ if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
+ metadata = 0;
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = node->bytenr;
- key.type = BTRFS_EXTENT_ITEM_KEY;
- key.offset = node->num_bytes;
+ if (metadata) {
+ struct btrfs_delayed_tree_ref *tree_ref;
+
+ tree_ref = btrfs_delayed_node_to_tree_ref(node);
+ key.type = BTRFS_METADATA_ITEM_KEY;
+ key.offset = tree_ref->level;
+ } else {
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = node->num_bytes;
+ }
+
+again:
path->reada = 1;
path->leave_spinning = 1;
ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
goto out;
}
if (ret > 0) {
+ if (metadata) {
+ btrfs_release_path(path);
+ metadata = 0;
+
+ key.offset = node->num_bytes;
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+ goto again;
+ }
err = -EIO;
goto out;
}
struct btrfs_key ins;
u64 parent = 0;
u64 ref_root = 0;
-
- ins.objectid = node->bytenr;
- ins.offset = node->num_bytes;
- ins.type = BTRFS_EXTENT_ITEM_KEY;
+ bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
+ SKINNY_METADATA);
ref = btrfs_delayed_node_to_tree_ref(node);
if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
else
ref_root = ref->root;
+ ins.objectid = node->bytenr;
+ if (skinny_metadata) {
+ ins.offset = ref->level;
+ ins.type = BTRFS_METADATA_ITEM_KEY;
+ } else {
+ ins.offset = node->num_bytes;
+ ins.type = BTRFS_EXTENT_ITEM_KEY;
+ }
+
BUG_ON(node->ref_mod != 1);
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
- BUG_ON(!extent_op || !extent_op->update_flags ||
- !extent_op->update_key);
+ BUG_ON(!extent_op || !extent_op->update_flags);
ret = alloc_reserved_tree_block(trans, root,
parent, ref_root,
extent_op->flags_to_set,
btrfs_free_delayed_extent_op(extent_op);
if (ret) {
- printk(KERN_DEBUG
- "btrfs: run_delayed_extent_op "
- "returned %d\n", ret);
+ btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
spin_lock(&delayed_refs->lock);
btrfs_delayed_ref_unlock(locked_ref);
return ret;
if (ret) {
btrfs_delayed_ref_unlock(locked_ref);
btrfs_put_delayed_ref(ref);
- printk(KERN_DEBUG
- "btrfs: run_one_delayed_ref returned %d\n", ret);
+ btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
spin_lock(&delayed_refs->lock);
return ret;
}
if (list_empty(&trans->qgroup_ref_list) !=
!trans->delayed_ref_elem.seq) {
/* list without seq or seq without list */
- printk(KERN_ERR "btrfs: qgroup accounting update error, list is%s empty, seq is %llu\n",
+ btrfs_err(fs_info,
+ "qgroup accounting update error, list is%s empty, seq is %#x.%x",
list_empty(&trans->qgroup_ref_list) ? "" : " not",
- trans->delayed_ref_elem.seq);
+ (u32)(trans->delayed_ref_elem.seq >> 32),
+ (u32)trans->delayed_ref_elem.seq);
BUG();
}
* progress (either running or paused) picks the target profile (if it's
* already available), otherwise falls back to plain reducing.
*/
-u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
+static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
{
/*
* we add in the count of missing devices because we want
rcu_read_unlock();
}
+static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
+{
+ return (global->size << 1);
+}
+
static int should_alloc_chunk(struct btrfs_root *root,
struct btrfs_space_info *sinfo, int force)
{
* global_rsv, it doesn't change except when the transaction commits.
*/
if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
- num_allocated += global_rsv->size;
+ num_allocated += calc_global_rsv_need_space(global_rsv);
/*
* in limited mode, we want to have some free space up to
thresh = get_system_chunk_thresh(root, type);
if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
- printk(KERN_INFO "left=%llu, need=%llu, flags=%llu\n",
- left, thresh, type);
+ btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
+ left, thresh, type);
dump_space_info(info, 0, 0);
}
{
struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
u64 profile = btrfs_get_alloc_profile(root, 0);
- u64 rsv_size = 0;
+ u64 space_size;
u64 avail;
u64 used;
u64 to_add;
used = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_pinned + space_info->bytes_readonly;
- spin_lock(&global_rsv->lock);
- rsv_size = global_rsv->size;
- spin_unlock(&global_rsv->lock);
-
/*
* We only want to allow over committing if we have lots of actual space
* free, but if we don't have enough space to handle the global reserve
* space then we could end up having a real enospc problem when trying
* to allocate a chunk or some other such important allocation.
*/
- rsv_size <<= 1;
- if (used + rsv_size >= space_info->total_bytes)
+ spin_lock(&global_rsv->lock);
+ space_size = calc_global_rsv_need_space(global_rsv);
+ spin_unlock(&global_rsv->lock);
+ if (used + space_size >= space_info->total_bytes)
return 0;
used += space_info->bytes_may_use;
return 0;
}
-void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
- unsigned long nr_pages)
+static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
+ unsigned long nr_pages)
{
struct super_block *sb = root->fs_info->sb;
int started;
* the disk).
*/
btrfs_start_delalloc_inodes(root, 0);
- btrfs_wait_ordered_extents(root, 0);
+ if (!current->journal_info)
+ btrfs_wait_ordered_extents(root, 0);
}
}
u64 bytenr, u64 num_bytes)
{
struct btrfs_block_group_cache *cache;
+ int ret;
cache = btrfs_lookup_block_group(root->fs_info, bytenr);
- BUG_ON(!cache); /* Logic error */
+ if (!cache)
+ return -EINVAL;
/*
* pull in the free space cache (if any) so that our pin
pin_down_extent(root, cache, bytenr, num_bytes, 0);
/* remove us from the free space cache (if we're there at all) */
- btrfs_remove_free_space(cache, bytenr, num_bytes);
+ ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
btrfs_put_block_group(cache);
- return 0;
+ return ret;
}
/**
int num_to_del = 1;
u32 item_size;
u64 refs;
+ bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
+ SKINNY_METADATA);
path = btrfs_alloc_path();
if (!path)
is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
BUG_ON(!is_data && refs_to_drop != 1);
+ if (is_data)
+ skinny_metadata = 0;
+
ret = lookup_extent_backref(trans, extent_root, path, &iref,
bytenr, num_bytes, parent,
root_objectid, owner_objectid,
found_extent = 1;
break;
}
+ if (key.type == BTRFS_METADATA_ITEM_KEY &&
+ key.offset == owner_objectid) {
+ found_extent = 1;
+ break;
+ }
if (path->slots[0] - extent_slot > 5)
break;
extent_slot--;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = num_bytes;
+ if (!is_data && skinny_metadata) {
+ key.type = BTRFS_METADATA_ITEM_KEY;
+ key.offset = owner_objectid;
+ }
+
ret = btrfs_search_slot(trans, extent_root,
&key, path, -1, 1);
+ if (ret > 0 && skinny_metadata && path->slots[0]) {
+ /*
+ * Couldn't find our skinny metadata item,
+ * see if we have ye olde extent item.
+ */
+ path->slots[0]--;
+ btrfs_item_key_to_cpu(path->nodes[0], &key,
+ path->slots[0]);
+ if (key.objectid == bytenr &&
+ key.type == BTRFS_EXTENT_ITEM_KEY &&
+ key.offset == num_bytes)
+ ret = 0;
+ }
+
+ if (ret > 0 && skinny_metadata) {
+ skinny_metadata = false;
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = num_bytes;
+ btrfs_release_path(path);
+ ret = btrfs_search_slot(trans, extent_root,
+ &key, path, -1, 1);
+ }
+
if (ret) {
- printk(KERN_ERR "umm, got %d back from search"
- ", was looking for %llu\n", ret,
- (unsigned long long)bytenr);
+ btrfs_err(info, "umm, got %d back from search, was looking for %llu",
+ ret, (unsigned long long)bytenr);
if (ret > 0)
btrfs_print_leaf(extent_root,
path->nodes[0]);
} else if (ret == -ENOENT) {
btrfs_print_leaf(extent_root, path->nodes[0]);
WARN_ON(1);
- printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
- "parent %llu root %llu owner %llu offset %llu\n",
- (unsigned long long)bytenr,
- (unsigned long long)parent,
- (unsigned long long)root_objectid,
- (unsigned long long)owner_objectid,
- (unsigned long long)owner_offset);
+ btrfs_err(info,
+ "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
+ (unsigned long long)bytenr,
+ (unsigned long long)parent,
+ (unsigned long long)root_objectid,
+ (unsigned long long)owner_objectid,
+ (unsigned long long)owner_offset);
} else {
btrfs_abort_transaction(trans, extent_root, ret);
goto out;
ret = btrfs_search_slot(trans, extent_root, &key, path,
-1, 1);
if (ret) {
- printk(KERN_ERR "umm, got %d back from search"
- ", was looking for %llu\n", ret,
- (unsigned long long)bytenr);
+ btrfs_err(info, "umm, got %d back from search, was looking for %llu",
+ ret, (unsigned long long)bytenr);
btrfs_print_leaf(extent_root, path->nodes[0]);
}
if (ret < 0) {
BUG_ON(item_size < sizeof(*ei));
ei = btrfs_item_ptr(leaf, extent_slot,
struct btrfs_extent_item);
- if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
+ if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
+ key.type == BTRFS_EXTENT_ITEM_KEY) {
struct btrfs_tree_block_info *bi;
BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
bi = (struct btrfs_tree_block_info *)(ei + 1);
}
refs = btrfs_extent_refs(leaf, ei);
- BUG_ON(refs < refs_to_drop);
+ if (refs < refs_to_drop) {
+ btrfs_err(info, "trying to drop %d refs but we only have %Lu "
+ "for bytenr %Lu\n", refs_to_drop, refs, bytenr);
+ ret = -EINVAL;
+ btrfs_abort_transaction(trans, extent_root, ret);
+ goto out;
+ }
refs -= refs_to_drop;
if (refs > 0) {
struct btrfs_root *orig_root,
u64 num_bytes, u64 empty_size,
u64 hint_byte, struct btrfs_key *ins,
- u64 data)
+ u64 flags)
{
int ret = 0;
struct btrfs_root *root = orig_root->fs_info->extent_root;
int empty_cluster = 2 * 1024 * 1024;
struct btrfs_space_info *space_info;
int loop = 0;
- int index = __get_raid_index(data);
- int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
+ int index = __get_raid_index(flags);
+ int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
bool found_uncached_bg = false;
bool failed_cluster_refill = false;
ins->objectid = 0;
ins->offset = 0;
- trace_find_free_extent(orig_root, num_bytes, empty_size, data);
+ trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
- space_info = __find_space_info(root->fs_info, data);
+ space_info = __find_space_info(root->fs_info, flags);
if (!space_info) {
- printk(KERN_ERR "No space info for %llu\n", data);
+ btrfs_err(root->fs_info, "No space info for %llu", flags);
return -ENOSPC;
}
if (btrfs_mixed_space_info(space_info))
use_cluster = false;
- if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
+ if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
last_ptr = &root->fs_info->meta_alloc_cluster;
if (!btrfs_test_opt(root, SSD))
empty_cluster = 64 * 1024;
}
- if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
+ if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
btrfs_test_opt(root, SSD)) {
last_ptr = &root->fs_info->data_alloc_cluster;
}
* However if we are re-searching with an ideal block group
* picked out then we don't care that the block group is cached.
*/
- if (block_group && block_group_bits(block_group, data) &&
+ if (block_group && block_group_bits(block_group, flags) &&
block_group->cached != BTRFS_CACHE_NO) {
down_read(&space_info->groups_sem);
if (list_empty(&block_group->list) ||
* raid types, but we want to make sure we only allocate
* for the proper type.
*/
- if (!block_group_bits(block_group, data)) {
+ if (!block_group_bits(block_group, flags)) {
u64 extra = BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID5 |
* doesn't provide them, bail. This does allow us to
* fill raid0 from raid1.
*/
- if ((data & extra) && !(block_group->flags & extra))
+ if ((flags & extra) && !(block_group->flags & extra))
goto loop;
}
if (used_block_group != block_group &&
(!used_block_group ||
used_block_group->ro ||
- !block_group_bits(used_block_group, data))) {
+ !block_group_bits(used_block_group, flags))) {
used_block_group = block_group;
goto refill_cluster;
}
index = 0;
loop++;
if (loop == LOOP_ALLOC_CHUNK) {
- ret = do_chunk_alloc(trans, root, data,
+ ret = do_chunk_alloc(trans, root, flags,
CHUNK_ALLOC_FORCE);
/*
* Do not bail out on ENOSPC since we
struct btrfs_root *root,
u64 num_bytes, u64 min_alloc_size,
u64 empty_size, u64 hint_byte,
- struct btrfs_key *ins, u64 data)
+ struct btrfs_key *ins, int is_data)
{
bool final_tried = false;
+ u64 flags;
int ret;
- data = btrfs_get_alloc_profile(root, data);
+ flags = btrfs_get_alloc_profile(root, is_data);
again:
WARN_ON(num_bytes < root->sectorsize);
ret = find_free_extent(trans, root, num_bytes, empty_size,
- hint_byte, ins, data);
+ hint_byte, ins, flags);
if (ret == -ENOSPC) {
if (!final_tried) {
} else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
struct btrfs_space_info *sinfo;
- sinfo = __find_space_info(root->fs_info, data);
- printk(KERN_ERR "btrfs allocation failed flags %llu, "
- "wanted %llu\n", (unsigned long long)data,
- (unsigned long long)num_bytes);
+ sinfo = __find_space_info(root->fs_info, flags);
+ btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
+ (unsigned long long)flags,
+ (unsigned long long)num_bytes);
if (sinfo)
dump_space_info(sinfo, num_bytes, 1);
}
cache = btrfs_lookup_block_group(root->fs_info, start);
if (!cache) {
- printk(KERN_ERR "Unable to find block group for %llu\n",
- (unsigned long long)start);
+ btrfs_err(root->fs_info, "Unable to find block group for %llu",
+ (unsigned long long)start);
return -ENOSPC;
}
ret = update_block_group(root, ins->objectid, ins->offset, 1);
if (ret) { /* -ENOENT, logic error */
- printk(KERN_ERR "btrfs update block group failed for %llu "
- "%llu\n", (unsigned long long)ins->objectid,
- (unsigned long long)ins->offset);
+ btrfs_err(fs_info, "update block group failed for %llu %llu",
+ (unsigned long long)ins->objectid,
+ (unsigned long long)ins->offset);
BUG();
}
return ret;
struct btrfs_extent_inline_ref *iref;
struct btrfs_path *path;
struct extent_buffer *leaf;
- u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
+ u32 size = sizeof(*extent_item) + sizeof(*iref);
+ bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
+ SKINNY_METADATA);
+
+ if (!skinny_metadata)
+ size += sizeof(*block_info);
path = btrfs_alloc_path();
if (!path)
btrfs_set_extent_generation(leaf, extent_item, trans->transid);
btrfs_set_extent_flags(leaf, extent_item,
flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
- block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
- btrfs_set_tree_block_key(leaf, block_info, key);
- btrfs_set_tree_block_level(leaf, block_info, level);
+ if (skinny_metadata) {
+ iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
+ } else {
+ block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
+ btrfs_set_tree_block_key(leaf, block_info, key);
+ btrfs_set_tree_block_level(leaf, block_info, level);
+ iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
+ }
- iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
if (parent > 0) {
BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
btrfs_set_extent_inline_ref_type(leaf, iref,
btrfs_mark_buffer_dirty(leaf);
btrfs_free_path(path);
- ret = update_block_group(root, ins->objectid, ins->offset, 1);
+ ret = update_block_group(root, ins->objectid, root->leafsize, 1);
if (ret) { /* -ENOENT, logic error */
- printk(KERN_ERR "btrfs update block group failed for %llu "
- "%llu\n", (unsigned long long)ins->objectid,
- (unsigned long long)ins->offset);
+ btrfs_err(fs_info, "update block group failed for %llu %llu",
+ (unsigned long long)ins->objectid,
+ (unsigned long long)ins->offset);
BUG();
}
return ret;
if (!caching_ctl) {
BUG_ON(!block_group_cache_done(block_group));
ret = btrfs_remove_free_space(block_group, start, num_bytes);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret)
+ goto out;
} else {
mutex_lock(&caching_ctl->mutex);
if (start >= caching_ctl->progress) {
ret = add_excluded_extent(root, start, num_bytes);
- BUG_ON(ret); /* -ENOMEM */
} else if (start + num_bytes <= caching_ctl->progress) {
ret = btrfs_remove_free_space(block_group,
start, num_bytes);
- BUG_ON(ret); /* -ENOMEM */
} else {
num_bytes = caching_ctl->progress - start;
ret = btrfs_remove_free_space(block_group,
start, num_bytes);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret)
+ goto out_lock;
start = caching_ctl->progress;
num_bytes = ins->objectid + ins->offset -
caching_ctl->progress;
ret = add_excluded_extent(root, start, num_bytes);
- BUG_ON(ret); /* -ENOMEM */
}
-
+out_lock:
mutex_unlock(&caching_ctl->mutex);
put_caching_control(caching_ctl);
+ if (ret)
+ goto out;
}
ret = btrfs_update_reserved_bytes(block_group, ins->offset,
RESERVE_ALLOC_NO_ACCOUNT);
BUG_ON(ret); /* logic error */
- btrfs_put_block_group(block_group);
ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
0, owner, offset, ins, 1);
+out:
+ btrfs_put_block_group(block_group);
return ret;
}
-struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 bytenr, u32 blocksize,
- int level)
+static struct extent_buffer *
+btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ u64 bytenr, u32 blocksize, int level)
{
struct extent_buffer *buf;
struct extent_buffer *buf;
u64 flags = 0;
int ret;
-
+ bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
+ SKINNY_METADATA);
block_rsv = use_block_rsv(trans, root, blocksize);
if (IS_ERR(block_rsv))
else
memset(&extent_op->key, 0, sizeof(extent_op->key));
extent_op->flags_to_set = flags;
- extent_op->update_key = 1;
+ if (skinny_metadata)
+ extent_op->update_key = 0;
+ else
+ extent_op->update_key = 1;
extent_op->update_flags = 1;
extent_op->is_data = 0;
continue;
/* We don't lock the tree block, it's OK to be racy here */
- ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
- &refs, &flags);
+ ret = btrfs_lookup_extent_info(trans, root, bytenr,
+ wc->level - 1, 1, &refs,
+ &flags);
/* We don't care about errors in readahead. */
if (ret < 0)
continue;
(wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
BUG_ON(!path->locks[level]);
ret = btrfs_lookup_extent_info(trans, root,
- eb->start, eb->len,
+ eb->start, level, 1,
&wc->refs[level],
&wc->flags[level]);
BUG_ON(ret == -ENOMEM);
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
- ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
+ ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
&wc->refs[level - 1],
&wc->flags[level - 1]);
if (ret < 0) {
return ret;
}
- BUG_ON(wc->refs[level - 1] == 0);
+ if (unlikely(wc->refs[level - 1] == 0)) {
+ btrfs_err(root->fs_info, "Missing references.");
+ BUG();
+ }
*lookup_info = 0;
if (wc->stage == DROP_REFERENCE) {
if (reada && level == 1)
reada_walk_down(trans, root, wc, path);
next = read_tree_block(root, bytenr, blocksize, generation);
- if (!next)
+ if (!next || !extent_buffer_uptodate(next)) {
+ free_extent_buffer(next);
return -EIO;
+ }
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
}
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
ret = btrfs_lookup_extent_info(trans, root,
- eb->start, eb->len,
+ eb->start, level, 1,
&wc->refs[level],
&wc->flags[level]);
if (ret < 0) {
* reference count by one. if update_ref is true, this function
* also make sure backrefs for the shared block and all lower level
* blocks are properly updated.
+ *
+ * If called with for_reloc == 0, may exit early with -EAGAIN
*/
int btrfs_drop_snapshot(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, int update_ref,
ret = btrfs_lookup_extent_info(trans, root,
path->nodes[level]->start,
- path->nodes[level]->len,
- &wc->refs[level],
+ level, 1, &wc->refs[level],
&wc->flags[level]);
if (ret < 0) {
err = ret;
wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
while (1) {
+ if (!for_reloc && btrfs_fs_closing(root->fs_info)) {
+ pr_debug("btrfs: drop snapshot early exit\n");
+ err = -EAGAIN;
+ goto out_end_trans;
+ }
+
ret = walk_down_tree(trans, root, path, wc);
if (ret < 0) {
err = ret;
free_excluded_extents(root, cache);
}
+ ret = btrfs_add_block_group_cache(root->fs_info, cache);
+ if (ret) {
+ btrfs_remove_free_space_cache(cache);
+ btrfs_put_block_group(cache);
+ goto error;
+ }
+
ret = update_space_info(info, cache->flags, found_key.offset,
btrfs_block_group_used(&cache->item),
&space_info);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret) {
+ btrfs_remove_free_space_cache(cache);
+ spin_lock(&info->block_group_cache_lock);
+ rb_erase(&cache->cache_node,
+ &info->block_group_cache_tree);
+ spin_unlock(&info->block_group_cache_lock);
+ btrfs_put_block_group(cache);
+ goto error;
+ }
+
cache->space_info = space_info;
spin_lock(&cache->space_info->lock);
cache->space_info->bytes_readonly += cache->bytes_super;
__link_block_group(space_info, cache);
- ret = btrfs_add_block_group_cache(root->fs_info, cache);
- BUG_ON(ret); /* Logic error */
-
set_avail_alloc_bits(root->fs_info, cache->flags);
if (btrfs_chunk_readonly(root, cache->key.objectid))
set_block_group_ro(cache, 1);
free_excluded_extents(root, cache);
+ ret = btrfs_add_block_group_cache(root->fs_info, cache);
+ if (ret) {
+ btrfs_remove_free_space_cache(cache);
+ btrfs_put_block_group(cache);
+ return ret;
+ }
+
ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
&cache->space_info);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret) {
+ btrfs_remove_free_space_cache(cache);
+ spin_lock(&root->fs_info->block_group_cache_lock);
+ rb_erase(&cache->cache_node,
+ &root->fs_info->block_group_cache_tree);
+ spin_unlock(&root->fs_info->block_group_cache_lock);
+ btrfs_put_block_group(cache);
+ return ret;
+ }
update_global_block_rsv(root->fs_info);
spin_lock(&cache->space_info->lock);
__link_block_group(cache->space_info, cache);
- ret = btrfs_add_block_group_cache(root->fs_info, cache);
- BUG_ON(ret); /* Logic error */
-
list_add_tail(&cache->new_bg_list, &trans->new_bgs);
set_avail_alloc_bits(extent_root->fs_info, type);
static struct kmem_cache *extent_state_cache;
static struct kmem_cache *extent_buffer_cache;
+#ifdef CONFIG_BTRFS_DEBUG
static LIST_HEAD(buffers);
static LIST_HEAD(states);
-#define LEAK_DEBUG 0
-#if LEAK_DEBUG
static DEFINE_SPINLOCK(leak_lock);
+
+static inline
+void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&leak_lock, flags);
+ list_add(new, head);
+ spin_unlock_irqrestore(&leak_lock, flags);
+}
+
+static inline
+void btrfs_leak_debug_del(struct list_head *entry)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&leak_lock, flags);
+ list_del(entry);
+ spin_unlock_irqrestore(&leak_lock, flags);
+}
+
+static inline
+void btrfs_leak_debug_check(void)
+{
+ struct extent_state *state;
+ struct extent_buffer *eb;
+
+ while (!list_empty(&states)) {
+ state = list_entry(states.next, struct extent_state, leak_list);
+ printk(KERN_ERR "btrfs state leak: start %llu end %llu "
+ "state %lu in tree %p refs %d\n",
+ (unsigned long long)state->start,
+ (unsigned long long)state->end,
+ state->state, state->tree, atomic_read(&state->refs));
+ list_del(&state->leak_list);
+ kmem_cache_free(extent_state_cache, state);
+ }
+
+ while (!list_empty(&buffers)) {
+ eb = list_entry(buffers.next, struct extent_buffer, leak_list);
+ printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
+ "refs %d\n", (unsigned long long)eb->start,
+ eb->len, atomic_read(&eb->refs));
+ list_del(&eb->leak_list);
+ kmem_cache_free(extent_buffer_cache, eb);
+ }
+}
+#else
+#define btrfs_leak_debug_add(new, head) do {} while (0)
+#define btrfs_leak_debug_del(entry) do {} while (0)
+#define btrfs_leak_debug_check() do {} while (0)
#endif
#define BUFFER_LRU_MAX 64
void extent_io_exit(void)
{
- struct extent_state *state;
- struct extent_buffer *eb;
-
- while (!list_empty(&states)) {
- state = list_entry(states.next, struct extent_state, leak_list);
- printk(KERN_ERR "btrfs state leak: start %llu end %llu "
- "state %lu in tree %p refs %d\n",
- (unsigned long long)state->start,
- (unsigned long long)state->end,
- state->state, state->tree, atomic_read(&state->refs));
- list_del(&state->leak_list);
- kmem_cache_free(extent_state_cache, state);
-
- }
-
- while (!list_empty(&buffers)) {
- eb = list_entry(buffers.next, struct extent_buffer, leak_list);
- printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
- "refs %d\n", (unsigned long long)eb->start,
- eb->len, atomic_read(&eb->refs));
- list_del(&eb->leak_list);
- kmem_cache_free(extent_buffer_cache, eb);
- }
+ btrfs_leak_debug_check();
/*
* Make sure all delayed rcu free are flushed before we
static struct extent_state *alloc_extent_state(gfp_t mask)
{
struct extent_state *state;
-#if LEAK_DEBUG
- unsigned long flags;
-#endif
state = kmem_cache_alloc(extent_state_cache, mask);
if (!state)
state->state = 0;
state->private = 0;
state->tree = NULL;
-#if LEAK_DEBUG
- spin_lock_irqsave(&leak_lock, flags);
- list_add(&state->leak_list, &states);
- spin_unlock_irqrestore(&leak_lock, flags);
-#endif
+ btrfs_leak_debug_add(&state->leak_list, &states);
atomic_set(&state->refs, 1);
init_waitqueue_head(&state->wq);
trace_alloc_extent_state(state, mask, _RET_IP_);
if (!state)
return;
if (atomic_dec_and_test(&state->refs)) {
-#if LEAK_DEBUG
- unsigned long flags;
-#endif
WARN_ON(state->tree);
-#if LEAK_DEBUG
- spin_lock_irqsave(&leak_lock, flags);
- list_del(&state->leak_list);
- spin_unlock_irqrestore(&leak_lock, flags);
-#endif
+ btrfs_leak_debug_del(&state->leak_list);
trace_free_extent_state(state, _RET_IP_);
kmem_cache_free(extent_state_cache, state);
}
}
static void set_state_cb(struct extent_io_tree *tree,
- struct extent_state *state, int *bits)
+ struct extent_state *state, unsigned long *bits)
{
if (tree->ops && tree->ops->set_bit_hook)
tree->ops->set_bit_hook(tree->mapping->host, state, bits);
}
static void clear_state_cb(struct extent_io_tree *tree,
- struct extent_state *state, int *bits)
+ struct extent_state *state, unsigned long *bits)
{
if (tree->ops && tree->ops->clear_bit_hook)
tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
}
static void set_state_bits(struct extent_io_tree *tree,
- struct extent_state *state, int *bits);
+ struct extent_state *state, unsigned long *bits);
/*
* insert an extent_state struct into the tree. 'bits' are set on the
*/
static int insert_state(struct extent_io_tree *tree,
struct extent_state *state, u64 start, u64 end,
- int *bits)
+ unsigned long *bits)
{
struct rb_node *node;
*/
static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
struct extent_state *state,
- int *bits, int wake)
+ unsigned long *bits, int wake)
{
struct extent_state *next;
- int bits_to_clear = *bits & ~EXTENT_CTLBITS;
+ unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS;
if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
u64 range = state->end - state->start + 1;
return prealloc;
}
-void extent_io_tree_panic(struct extent_io_tree *tree, int err)
+static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
{
btrfs_panic(tree_fs_info(tree), err, "Locking error: "
"Extent tree was modified by another "
* This takes the tree lock, and returns 0 on success and < 0 on error.
*/
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int wake, int delete,
+ unsigned long bits, int wake, int delete,
struct extent_state **cached_state,
gfp_t mask)
{
* The range [start, end] is inclusive.
* The tree lock is taken by this function
*/
-void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
+static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned long bits)
{
struct extent_state *state;
struct rb_node *node;
static void set_state_bits(struct extent_io_tree *tree,
struct extent_state *state,
- int *bits)
+ unsigned long *bits)
{
- int bits_to_set = *bits & ~EXTENT_CTLBITS;
+ unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS;
set_state_cb(tree, state, bits);
if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
static int __must_check
__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int exclusive_bits, u64 *failed_start,
- struct extent_state **cached_state, gfp_t mask)
+ unsigned long bits, unsigned long exclusive_bits,
+ u64 *failed_start, struct extent_state **cached_state,
+ gfp_t mask)
{
struct extent_state *state;
struct extent_state *prealloc = NULL;
goto again;
}
-int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
- u64 *failed_start, struct extent_state **cached_state,
- gfp_t mask)
+int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned long bits, u64 * failed_start,
+ struct extent_state **cached_state, gfp_t mask)
{
return __set_extent_bit(tree, start, end, bits, 0, failed_start,
cached_state, mask);
* boundary bits like LOCK.
*/
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int clear_bits,
+ unsigned long bits, unsigned long clear_bits,
struct extent_state **cached_state, gfp_t mask)
{
struct extent_state *state;
}
int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, gfp_t mask)
+ unsigned long bits, gfp_t mask)
{
return set_extent_bit(tree, start, end, bits, NULL,
NULL, mask);
}
int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, gfp_t mask)
+ unsigned long bits, gfp_t mask)
{
return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
}
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state, gfp_t mask)
{
- return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
+ return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
cached_state, mask);
}
* us if waiting is desired.
*/
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, struct extent_state **cached_state)
+ unsigned long bits, struct extent_state **cached_state)
{
int err;
u64 failed_start;
* return it. tree->lock must be held. NULL will returned if
* nothing was found after 'start'
*/
-struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
- u64 start, int bits)
+static struct extent_state *
+find_first_extent_bit_state(struct extent_io_tree *tree,
+ u64 start, unsigned long bits)
{
struct rb_node *node;
struct extent_state *state;
* If nothing was found, 1 is returned. If found something, return 0.
*/
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, int bits,
+ u64 *start_ret, u64 *end_ret, unsigned long bits,
struct extent_state **cached_state)
{
struct extent_state *state;
unsigned long end_index = end >> PAGE_CACHE_SHIFT;
unsigned long nr_pages = end_index - index + 1;
int i;
- int clear_bits = 0;
+ unsigned long clear_bits = 0;
if (op & EXTENT_CLEAR_UNLOCK)
clear_bits |= EXTENT_LOCKED;
return ret;
}
+void extent_cache_csums_dio(struct extent_io_tree *tree, u64 start, u32 csums[],
+ int count)
+{
+ struct rb_node *node;
+ struct extent_state *state;
+
+ spin_lock(&tree->lock);
+ /*
+ * this search will find all the extents that end after
+ * our range starts.
+ */
+ node = tree_search(tree, start);
+ BUG_ON(!node);
+
+ state = rb_entry(node, struct extent_state, rb_node);
+ BUG_ON(state->start != start);
+
+ while (count) {
+ state->private = *csums++;
+ count--;
+ state = next_state(state);
+ }
+ spin_unlock(&tree->lock);
+}
+
+static inline u64 __btrfs_get_bio_offset(struct bio *bio, int bio_index)
+{
+ struct bio_vec *bvec = bio->bi_io_vec + bio_index;
+
+ return page_offset(bvec->bv_page) + bvec->bv_offset;
+}
+
+void extent_cache_csums(struct extent_io_tree *tree, struct bio *bio, int bio_index,
+ u32 csums[], int count)
+{
+ struct rb_node *node;
+ struct extent_state *state = NULL;
+ u64 start;
+
+ spin_lock(&tree->lock);
+ do {
+ start = __btrfs_get_bio_offset(bio, bio_index);
+ if (state == NULL || state->start != start) {
+ node = tree_search(tree, start);
+ BUG_ON(!node);
+
+ state = rb_entry(node, struct extent_state, rb_node);
+ BUG_ON(state->start != start);
+ }
+ state->private = *csums++;
+ count--;
+ bio_index++;
+
+ state = next_state(state);
+ } while (count);
+ spin_unlock(&tree->lock);
+}
+
int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
{
struct rb_node *node;
* range is found set.
*/
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int filled, struct extent_state *cached)
+ unsigned long bits, int filled, struct extent_state *cached)
{
struct extent_state *state = NULL;
struct rb_node *node;
return ret;
}
-void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
+static void attach_extent_buffer_page(struct extent_buffer *eb,
+ struct page *page)
{
if (!PagePrivate(page)) {
SetPagePrivate(page);
struct page *page,
get_extent_t *get_extent,
struct bio **bio, int mirror_num,
- unsigned long *bio_flags)
+ unsigned long *bio_flags, int rw)
{
struct inode *inode = page->mapping->host;
u64 start = page_offset(page);
}
pnr -= page->index;
- ret = submit_extent_page(READ, tree, page,
+ ret = submit_extent_page(rw, tree, page,
sector, disk_io_size, pg_offset,
bdev, bio, pnr,
end_bio_extent_readpage, mirror_num,
int ret;
ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
- &bio_flags);
+ &bio_flags, READ);
if (bio)
ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
return ret;
return 0;
}
-static void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
+void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
{
wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
TASK_UNINTERRUPTIBLE);
u64 offset = eb->start;
unsigned long i, num_pages;
unsigned long bio_flags = 0;
- int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
+ int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
int ret = 0;
clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
continue;
for (i = 0; i < nr; i++) {
__extent_read_full_page(tree, pagepool[i], get_extent,
- &bio, 0, &bio_flags);
+ &bio, 0, &bio_flags, READ);
page_cache_release(pagepool[i]);
}
nr = 0;
}
for (i = 0; i < nr; i++) {
__extent_read_full_page(tree, pagepool[i], get_extent,
- &bio, 0, &bio_flags);
+ &bio, 0, &bio_flags, READ);
page_cache_release(pagepool[i]);
}
* are locked or under IO and drops the related state bits if it is safe
* to drop the page.
*/
-int try_release_extent_state(struct extent_map_tree *map,
- struct extent_io_tree *tree, struct page *page,
- gfp_t mask)
+static int try_release_extent_state(struct extent_map_tree *map,
+ struct extent_io_tree *tree,
+ struct page *page, gfp_t mask)
{
u64 start = page_offset(page);
u64 end = start + PAGE_CACHE_SIZE - 1;
static void __free_extent_buffer(struct extent_buffer *eb)
{
-#if LEAK_DEBUG
- unsigned long flags;
- spin_lock_irqsave(&leak_lock, flags);
- list_del(&eb->leak_list);
- spin_unlock_irqrestore(&leak_lock, flags);
-#endif
+ btrfs_leak_debug_del(&eb->leak_list);
kmem_cache_free(extent_buffer_cache, eb);
}
gfp_t mask)
{
struct extent_buffer *eb = NULL;
-#if LEAK_DEBUG
- unsigned long flags;
-#endif
eb = kmem_cache_zalloc(extent_buffer_cache, mask);
if (eb == NULL)
init_waitqueue_head(&eb->write_lock_wq);
init_waitqueue_head(&eb->read_lock_wq);
-#if LEAK_DEBUG
- spin_lock_irqsave(&leak_lock, flags);
- list_add(&eb->leak_list, &buffers);
- spin_unlock_irqrestore(&leak_lock, flags);
-#endif
+ btrfs_leak_debug_add(&eb->leak_list, &buffers);
+
spin_lock_init(&eb->refs_lock);
atomic_set(&eb->refs, 1);
atomic_set(&eb->io_pages, 0);
}
/* Expects to have eb->eb_lock already held */
-static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
+static int release_extent_buffer(struct extent_buffer *eb)
{
WARN_ON(atomic_read(&eb->refs) == 0);
if (atomic_dec_and_test(&eb->refs)) {
* I know this is terrible, but it's temporary until we stop tracking
* the uptodate bits and such for the extent buffers.
*/
- release_extent_buffer(eb, GFP_ATOMIC);
+ release_extent_buffer(eb);
}
void free_extent_buffer_stale(struct extent_buffer *eb)
if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
atomic_dec(&eb->refs);
- release_extent_buffer(eb, GFP_NOFS);
+ release_extent_buffer(eb);
}
void clear_extent_buffer_dirty(struct extent_buffer *eb)
return was_dirty;
}
-static int range_straddles_pages(u64 start, u64 len)
-{
- if (len < PAGE_CACHE_SIZE)
- return 1;
- if (start & (PAGE_CACHE_SIZE - 1))
- return 1;
- if ((start + len) & (PAGE_CACHE_SIZE - 1))
- return 1;
- return 0;
-}
-
int clear_extent_buffer_uptodate(struct extent_buffer *eb)
{
unsigned long i;
return 0;
}
-int extent_range_uptodate(struct extent_io_tree *tree,
- u64 start, u64 end)
-{
- struct page *page;
- int ret;
- int pg_uptodate = 1;
- int uptodate;
- unsigned long index;
-
- if (range_straddles_pages(start, end - start + 1)) {
- ret = test_range_bit(tree, start, end,
- EXTENT_UPTODATE, 1, NULL);
- if (ret)
- return 1;
- }
- while (start <= end) {
- index = start >> PAGE_CACHE_SHIFT;
- page = find_get_page(tree->mapping, index);
- if (!page)
- return 1;
- uptodate = PageUptodate(page);
- page_cache_release(page);
- if (!uptodate) {
- pg_uptodate = 0;
- break;
- }
- start += PAGE_CACHE_SIZE;
- }
- return pg_uptodate;
-}
-
int extent_buffer_uptodate(struct extent_buffer *eb)
{
return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
ClearPageError(page);
err = __extent_read_full_page(tree, page,
get_extent, &bio,
- mirror_num, &bio_flags);
+ mirror_num, &bio_flags,
+ READ | REQ_META);
if (err)
ret = err;
} else {
}
if (bio) {
- err = submit_one_bio(READ, bio, mirror_num, bio_flags);
+ err = submit_one_bio(READ | REQ_META, bio, mirror_num,
+ bio_flags);
if (err)
return err;
}
}
}
-int try_release_extent_buffer(struct page *page, gfp_t mask)
+int try_release_extent_buffer(struct page *page)
{
struct extent_buffer *eb;
}
spin_unlock(&page->mapping->private_lock);
- if ((mask & GFP_NOFS) == GFP_NOFS)
- mask = GFP_NOFS;
-
/*
* If tree ref isn't set then we know the ref on this eb is a real ref,
* so just return, this page will likely be freed soon anyway.
return 0;
}
- return release_extent_buffer(eb, mask);
+ return release_extent_buffer(eb);
}
int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate);
void (*set_bit_hook)(struct inode *inode, struct extent_state *state,
- int *bits);
+ unsigned long *bits);
void (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
- int *bits);
+ unsigned long *bits);
void (*merge_extent_hook)(struct inode *inode,
struct extent_state *new,
struct extent_state *other);
/* for use by the FS */
u64 private;
+#ifdef CONFIG_BTRFS_DEBUG
struct list_head leak_list;
+#endif
};
#define INLINE_EXTENT_BUFFER_PAGES 16
atomic_t refs;
atomic_t io_pages;
int read_mirror;
- struct list_head leak_list;
struct rcu_head rcu_head;
pid_t lock_owner;
wait_queue_head_t read_lock_wq;
wait_queue_head_t lock_wq;
struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
+#ifdef CONFIG_BTRFS_DEBUG
+ struct list_head leak_list;
+#endif
};
static inline void extent_set_compress_type(unsigned long *bio_flags,
int try_release_extent_mapping(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page,
gfp_t mask);
-int try_release_extent_buffer(struct page *page, gfp_t mask);
-int try_release_extent_state(struct extent_map_tree *map,
- struct extent_io_tree *tree, struct page *page,
- gfp_t mask);
+int try_release_extent_buffer(struct page *page);
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, struct extent_state **cached);
+ unsigned long bits, struct extent_state **cached);
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end);
int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached, gfp_t mask);
void free_extent_state(struct extent_state *state);
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int filled, struct extent_state *cached_state);
+ unsigned long bits, int filled,
+ struct extent_state *cached_state);
int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, gfp_t mask);
+ unsigned long bits, gfp_t mask);
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int wake, int delete, struct extent_state **cached,
- gfp_t mask);
+ unsigned long bits, int wake, int delete,
+ struct extent_state **cached, gfp_t mask);
int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, gfp_t mask);
+ unsigned long bits, gfp_t mask);
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, u64 *failed_start,
+ unsigned long bits, u64 *failed_start,
struct extent_state **cached_state, gfp_t mask);
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state, gfp_t mask);
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int clear_bits,
+ unsigned long bits, unsigned long clear_bits,
struct extent_state **cached_state, gfp_t mask);
int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state, gfp_t mask);
int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state, gfp_t mask);
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, int bits,
+ u64 *start_ret, u64 *end_ret, unsigned long bits,
struct extent_state **cached_state);
-struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
- u64 start, int bits);
int extent_invalidatepage(struct extent_io_tree *tree,
struct page *page, unsigned long offset);
int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len, get_extent_t *get_extent);
int set_state_private(struct extent_io_tree *tree, u64 start, u64 private);
+void extent_cache_csums_dio(struct extent_io_tree *tree, u64 start, u32 csums[],
+ int count);
+void extent_cache_csums(struct extent_io_tree *tree, struct bio *bio,
+ int bvec_index, u32 csums[], int count);
int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
void set_page_extent_mapped(struct page *page);
int read_extent_buffer_pages(struct extent_io_tree *tree,
struct extent_buffer *eb, u64 start, int wait,
get_extent_t *get_extent, int mirror_num);
+void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
static inline unsigned long num_extent_pages(u64 start, u64 len)
{
unsigned long src_offset, unsigned long len);
void memset_extent_buffer(struct extent_buffer *eb, char c,
unsigned long start, unsigned long len);
-void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
void clear_extent_buffer_dirty(struct extent_buffer *eb);
int set_extent_buffer_dirty(struct extent_buffer *eb);
int set_extent_buffer_uptodate(struct extent_buffer *eb);
unsigned long min_len, char **map,
unsigned long *map_start,
unsigned long *map_len);
-int extent_range_uptodate(struct extent_io_tree *tree,
- u64 start, u64 end);
int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
int extent_clear_unlock_delalloc(struct inode *inode,
test_bit(EXTENT_FLAG_LOGGING, &next->flags))
return 0;
+ /*
+ * We don't want to merge stuff that hasn't been written to the log yet
+ * since it may not reflect exactly what is on disk, and that would be
+ * bad.
+ */
+ if (!list_empty(&prev->list) || !list_empty(&next->list))
+ return 0;
+
if (extent_map_end(prev) == next->start &&
prev->flags == next->flags &&
prev->bdev == next->bdev &&
em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
em->mod_start = merge->mod_start;
em->generation = max(em->generation, merge->generation);
- list_move(&em->list, &tree->modified_extents);
- list_del_init(&merge->list);
rb_erase(&merge->rb_node, &tree->map);
free_extent_map(merge);
}
merge->in_tree = 0;
em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
em->generation = max(em->generation, merge->generation);
- list_del_init(&merge->list);
free_extent_map(merge);
}
}
* reference dropped if the merge attempt was successful.
*/
int add_extent_mapping(struct extent_map_tree *tree,
- struct extent_map *em)
+ struct extent_map *em, int modified)
{
int ret = 0;
struct rb_node *rb;
em->mod_start = em->start;
em->mod_len = em->len;
- try_merge_map(tree, em);
+ if (modified)
+ list_move(&em->list, &tree->modified_extents);
+ else
+ try_merge_map(tree, em);
out:
return ret;
}
return start + len;
}
-struct extent_map *__lookup_extent_mapping(struct extent_map_tree *tree,
- u64 start, u64 len, int strict)
+static struct extent_map *
+__lookup_extent_mapping(struct extent_map_tree *tree,
+ u64 start, u64 len, int strict)
{
struct extent_map *em;
struct rb_node *rb_node;
u64 mod_len;
u64 orig_start;
u64 orig_block_len;
+ u64 ram_bytes;
u64 block_start;
u64 block_len;
u64 generation;
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len);
int add_extent_mapping(struct extent_map_tree *tree,
- struct extent_map *em);
+ struct extent_map *em, int modified);
int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
struct extent_map *alloc_extent_map(void);
return ret;
}
-struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- u64 bytenr, int cow)
+static struct btrfs_csum_item *
+btrfs_lookup_csum(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ u64 bytenr, int cow)
{
int ret;
struct btrfs_key file_key;
return ret;
}
-u64 btrfs_file_extent_length(struct btrfs_path *path)
-{
- int extent_type;
- struct btrfs_file_extent_item *fi;
- u64 len;
-
- fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
- struct btrfs_file_extent_item);
- extent_type = btrfs_file_extent_type(path->nodes[0], fi);
-
- if (extent_type == BTRFS_FILE_EXTENT_REG ||
- extent_type == BTRFS_FILE_EXTENT_PREALLOC)
- len = btrfs_file_extent_num_bytes(path->nodes[0], fi);
- else if (extent_type == BTRFS_FILE_EXTENT_INLINE)
- len = btrfs_file_extent_inline_len(path->nodes[0], fi);
- else
- BUG();
-
- return len;
-}
-
static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
struct inode *inode, struct bio *bio,
u64 logical_offset, u32 *dst, int dio)
{
- u32 sum;
+ u32 sum[16];
+ int len;
struct bio_vec *bvec = bio->bi_io_vec;
int bio_index = 0;
u64 offset = 0;
u64 disk_bytenr;
u32 diff;
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
- int ret;
+ int count;
struct btrfs_path *path;
struct btrfs_csum_item *item = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
if (dio)
offset = logical_offset;
while (bio_index < bio->bi_vcnt) {
+ len = min_t(int, ARRAY_SIZE(sum), bio->bi_vcnt - bio_index);
if (!dio)
offset = page_offset(bvec->bv_page) + bvec->bv_offset;
- ret = btrfs_find_ordered_sum(inode, offset, disk_bytenr, &sum);
- if (ret == 0)
+ count = btrfs_find_ordered_sum(inode, offset, disk_bytenr, sum,
+ len);
+ if (count)
goto found;
if (!item || disk_bytenr < item_start_offset ||
item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
path, disk_bytenr, 0);
if (IS_ERR(item)) {
- ret = PTR_ERR(item);
- if (ret == -ENOENT || ret == -EFBIG)
- ret = 0;
- sum = 0;
+ count = 1;
+ sum[0] = 0;
if (BTRFS_I(inode)->root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
set_extent_bits(io_tree, offset,
diff = disk_bytenr - item_start_offset;
diff = diff / root->sectorsize;
diff = diff * csum_size;
-
- read_extent_buffer(path->nodes[0], &sum,
+ count = min_t(int, len, (item_last_offset - disk_bytenr) >>
+ inode->i_sb->s_blocksize_bits);
+ read_extent_buffer(path->nodes[0], sum,
((unsigned long)item) + diff,
- csum_size);
+ csum_size * count);
found:
- if (dst)
- *dst++ = sum;
- else
- set_state_private(io_tree, offset, sum);
- disk_bytenr += bvec->bv_len;
- offset += bvec->bv_len;
- bio_index++;
- bvec++;
+ if (dst) {
+ memcpy(dst, sum, count * csum_size);
+ dst += count;
+ } else {
+ if (dio)
+ extent_cache_csums_dio(io_tree, offset, sum,
+ count);
+ else
+ extent_cache_csums(io_tree, bio, bio_index, sum,
+ count);
+ }
+ while (count--) {
+ disk_bytenr += bvec->bv_len;
+ offset += bvec->bv_len;
+ bio_index++;
+ bvec++;
+ }
}
btrfs_free_path(path);
return 0;
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
- key.type != BTRFS_EXTENT_CSUM_KEY)
- break;
-
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- if (key.offset > end)
+ key.type != BTRFS_EXTENT_CSUM_KEY ||
+ key.offset > end)
break;
if (key.offset > start)
data = kmap_atomic(bvec->bv_page);
sector_sum->sum = ~(u32)0;
- sector_sum->sum = btrfs_csum_data(root,
- data + bvec->bv_offset,
+ sector_sum->sum = btrfs_csum_data(data + bvec->bv_offset,
sector_sum->sum,
bvec->bv_len);
kunmap_atomic(data);
* This calls btrfs_truncate_item with the correct args based on the
* overlap, and fixes up the key as required.
*/
-static noinline void truncate_one_csum(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+static noinline void truncate_one_csum(struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_key *key,
u64 bytenr, u64 len)
*/
u32 new_size = (bytenr - key->offset) >> blocksize_bits;
new_size *= csum_size;
- btrfs_truncate_item(trans, root, path, new_size, 1);
+ btrfs_truncate_item(root, path, new_size, 1);
} else if (key->offset >= bytenr && csum_end > end_byte &&
end_byte > key->offset) {
/*
u32 new_size = (csum_end - end_byte) >> blocksize_bits;
new_size *= csum_size;
- btrfs_truncate_item(trans, root, path, new_size, 0);
+ btrfs_truncate_item(root, path, new_size, 0);
key->offset = end_byte;
- btrfs_set_item_key_safe(trans, root, path, key);
+ btrfs_set_item_key_safe(root, path, key);
} else {
BUG();
}
key.offset = end_byte - 1;
} else {
- truncate_one_csum(trans, root, path, &key, bytenr, len);
+ truncate_one_csum(root, path, &key, bytenr, len);
if (key.offset < bytenr)
break;
}
diff /= csum_size;
diff *= csum_size;
- btrfs_extend_item(trans, root, path, diff);
+ btrfs_extend_item(root, path, diff);
goto csum;
}
* the same inode in the tree, we will merge them together (by
* __btrfs_add_inode_defrag()) and free the one that we want to requeue.
*/
-void btrfs_requeue_inode_defrag(struct inode *inode,
- struct inode_defrag *defrag)
+static void btrfs_requeue_inode_defrag(struct inode *inode,
+ struct inode_defrag *defrag)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
/*
* unlocks pages after btrfs_file_write is done with them
*/
-void btrfs_drop_pages(struct page **pages, size_t num_pages)
+static void btrfs_drop_pages(struct page **pages, size_t num_pages)
{
size_t i;
for (i = 0; i < num_pages; i++) {
* doing real data extents, marking pages dirty and delalloc as required.
*/
int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
- struct page **pages, size_t num_pages,
- loff_t pos, size_t write_bytes,
- struct extent_state **cached)
+ struct page **pages, size_t num_pages,
+ loff_t pos, size_t write_bytes,
+ struct extent_state **cached)
{
int err = 0;
int i;
int testend = 1;
unsigned long flags;
int compressed = 0;
+ bool modified;
WARN_ON(end < start);
if (end == (u64)-1) {
while (1) {
int no_splits = 0;
+ modified = false;
if (!split)
split = alloc_extent_map();
if (!split2)
compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
clear_bit(EXTENT_FLAG_PINNED, &em->flags);
clear_bit(EXTENT_FLAG_LOGGING, &flags);
+ modified = !list_empty(&em->list);
remove_extent_mapping(em_tree, em);
if (no_splits)
goto next;
split->block_len = em->block_len;
else
split->block_len = split->len;
+ split->ram_bytes = em->ram_bytes;
split->orig_block_len = max(split->block_len,
em->orig_block_len);
split->generation = gen;
split->bdev = em->bdev;
split->flags = flags;
split->compress_type = em->compress_type;
- ret = add_extent_mapping(em_tree, split);
+ ret = add_extent_mapping(em_tree, split, modified);
BUG_ON(ret); /* Logic error */
- list_move(&split->list, &em_tree->modified_extents);
free_extent_map(split);
split = split2;
split2 = NULL;
split->generation = gen;
split->orig_block_len = max(em->block_len,
em->orig_block_len);
+ split->ram_bytes = em->ram_bytes;
if (compressed) {
split->block_len = em->block_len;
split->orig_start = em->orig_start;
}
- ret = add_extent_mapping(em_tree, split);
+ ret = add_extent_mapping(em_tree, split, modified);
BUG_ON(ret); /* Logic error */
- list_move(&split->list, &em_tree->modified_extents);
free_extent_map(split);
split = NULL;
}
memcpy(&new_key, &key, sizeof(new_key));
new_key.offset = end;
- btrfs_set_item_key_safe(trans, root, path, &new_key);
+ btrfs_set_item_key_safe(root, path, &new_key);
extent_offset += end - key.offset;
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
ino, bytenr, orig_offset,
&other_start, &other_end)) {
new_key.offset = end;
- btrfs_set_item_key_safe(trans, root, path, &new_key);
+ btrfs_set_item_key_safe(root, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, fi,
trans->transid);
path->slots[0]++;
new_key.offset = start;
- btrfs_set_item_key_safe(trans, root, path, &new_key);
+ btrfs_set_item_key_safe(root, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
path->slots[0]++;
key.offset = offset;
- btrfs_set_item_key_safe(trans, root, path, &key);
+ btrfs_set_item_key_safe(root, path, &key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
} else {
hole_em->start = offset;
hole_em->len = end - offset;
+ hole_em->ram_bytes = hole_em->len;
hole_em->orig_start = offset;
hole_em->block_start = EXTENT_MAP_HOLE;
do {
btrfs_drop_extent_cache(inode, offset, end - 1, 0);
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, hole_em);
- if (!ret)
- list_move(&hole_em->list,
- &em_tree->modified_extents);
+ ret = add_extent_mapping(em_tree, hole_em, 1);
write_unlock(&em_tree->lock);
} while (ret == -EEXIST);
free_extent_map(hole_em);
spin_lock(&block_group->lock);
if (!((BTRFS_I(inode)->flags & flags) == flags)) {
- printk(KERN_INFO "Old style space inode found, converting.\n");
+ btrfs_info(root->fs_info,
+ "Old style space inode found, converting.");
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
BTRFS_INODE_NODATACOW;
block_group->disk_cache_state = BTRFS_DC_CLEAR;
return inode;
}
-int __create_free_space_inode(struct btrfs_root *root,
- struct btrfs_trans_handle *trans,
- struct btrfs_path *path, u64 ino, u64 offset)
+static int __create_free_space_inode(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ u64 ino, u64 offset)
{
struct btrfs_key key;
struct btrfs_disk_key disk_key;
if (index == 0)
offset = sizeof(u32) * io_ctl->num_pages;
- crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
+ crc = btrfs_csum_data(io_ctl->orig + offset, crc,
PAGE_CACHE_SIZE - offset);
btrfs_csum_final(crc, (char *)&crc);
io_ctl_unmap_page(io_ctl);
kunmap(io_ctl->pages[0]);
io_ctl_map_page(io_ctl, 0);
- crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
+ crc = btrfs_csum_data(io_ctl->orig + offset, crc,
PAGE_CACHE_SIZE - offset);
btrfs_csum_final(crc, (char *)&crc);
if (val != crc) {
spin_unlock(&ctl->tree_lock);
}
-int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
- struct btrfs_free_space_ctl *ctl,
- struct btrfs_path *path, u64 offset)
+static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ struct btrfs_free_space_ctl *ctl,
+ struct btrfs_path *path, u64 offset)
{
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
btrfs_release_path(path);
if (BTRFS_I(inode)->generation != generation) {
- printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
- " not match free space cache generation (%llu)\n",
- (unsigned long long)BTRFS_I(inode)->generation,
- (unsigned long long)generation);
+ btrfs_err(root->fs_info,
+ "free space inode generation (%llu) "
+ "did not match free space cache generation (%llu)",
+ (unsigned long long)BTRFS_I(inode)->generation,
+ (unsigned long long)generation);
return 0;
}
ret = link_free_space(ctl, e);
spin_unlock(&ctl->tree_lock);
if (ret) {
- printk(KERN_ERR "Duplicate entries in "
- "free space cache, dumping\n");
+ btrfs_err(root->fs_info,
+ "Duplicate entries in free space cache, dumping");
kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache;
}
ctl->op->recalc_thresholds(ctl);
spin_unlock(&ctl->tree_lock);
if (ret) {
- printk(KERN_ERR "Duplicate entries in "
- "free space cache, dumping\n");
+ btrfs_err(root->fs_info,
+ "Duplicate entries in free space cache, dumping");
kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache;
}
if (!matched) {
__btrfs_remove_free_space_cache(ctl);
- printk(KERN_ERR "block group %llu has an wrong amount of free "
- "space\n", block_group->key.objectid);
+ btrfs_err(fs_info, "block group %llu has wrong amount of free space",
+ block_group->key.objectid);
ret = -1;
}
out:
spin_unlock(&block_group->lock);
ret = 0;
- printk(KERN_ERR "btrfs: failed to load free space cache "
- "for block group %llu\n", block_group->key.objectid);
+ btrfs_err(fs_info, "failed to load free space cache for block group %llu",
+ block_group->key.objectid);
}
iput(inode);
* on mount. This will return 0 if it was successfull in writing the cache out,
* and -1 if it was not.
*/
-int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
- struct btrfs_free_space_ctl *ctl,
- struct btrfs_block_group_cache *block_group,
- struct btrfs_trans_handle *trans,
- struct btrfs_path *path, u64 offset)
+static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
+ struct btrfs_free_space_ctl *ctl,
+ struct btrfs_block_group_cache *block_group,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path, u64 offset)
{
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
spin_unlock(&block_group->lock);
ret = 0;
#ifdef DEBUG
- printk(KERN_ERR "btrfs: failed to write free space cache "
- "for block group %llu\n", block_group->key.objectid);
+ btrfs_err(root->fs_info,
+ "failed to write free space cache for block group %llu",
+ block_group->key.objectid);
#endif
}
search_bytes = ctl->unit;
search_bytes = min(search_bytes, end - search_start + 1);
ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
- BUG_ON(ret < 0 || search_start != *offset);
+ if (ret < 0 || search_start != *offset)
+ return -EINVAL;
/* We may have found more bits than what we need */
search_bytes = min(search_bytes, *bytes);
re_search = true;
goto again;
}
- BUG_ON(ret); /* logic error */
out_lock:
spin_unlock(&ctl->tree_lock);
out:
return 0;
}
-void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
+static void __btrfs_remove_free_space_cache_locked(
+ struct btrfs_free_space_ctl *ctl)
{
struct btrfs_free_space *info;
struct rb_node *node;
ret = __load_free_space_cache(root, inode, ctl, path, 0);
if (ret < 0)
- printk(KERN_ERR "btrfs: failed to load free ino cache for "
- "root %llu\n", root->root_key.objectid);
+ btrfs_err(fs_info,
+ "failed to load free ino cache for root %llu",
+ root->root_key.objectid);
out_put:
iput(inode);
out:
if (ret) {
btrfs_delalloc_release_metadata(inode, inode->i_size);
#ifdef DEBUG
- printk(KERN_ERR "btrfs: failed to write free ino cache "
- "for root %llu\n", root->root_key.objectid);
+ btrfs_err(root->fs_info,
+ "failed to write free ino cache for root %llu",
+ root->root_key.objectid);
#endif
}
iput(inode);
return ret;
}
+
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+static struct btrfs_block_group_cache *init_test_block_group(void)
+{
+ struct btrfs_block_group_cache *cache;
+
+ cache = kzalloc(sizeof(*cache), GFP_NOFS);
+ if (!cache)
+ return NULL;
+ cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
+ GFP_NOFS);
+ if (!cache->free_space_ctl) {
+ kfree(cache);
+ return NULL;
+ }
+
+ cache->key.objectid = 0;
+ cache->key.offset = 1024 * 1024 * 1024;
+ cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ cache->sectorsize = 4096;
+
+ spin_lock_init(&cache->lock);
+ INIT_LIST_HEAD(&cache->list);
+ INIT_LIST_HEAD(&cache->cluster_list);
+ INIT_LIST_HEAD(&cache->new_bg_list);
+
+ btrfs_init_free_space_ctl(cache);
+
+ return cache;
+}
+
+/*
+ * Checks to see if the given range is in the free space cache. This is really
+ * just used to check the absence of space, so if there is free space in the
+ * range at all we will return 1.
+ */
+static int check_exists(struct btrfs_block_group_cache *cache, u64 offset,
+ u64 bytes)
+{
+ struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
+ struct btrfs_free_space *info;
+ int ret = 0;
+
+ spin_lock(&ctl->tree_lock);
+ info = tree_search_offset(ctl, offset, 0, 0);
+ if (!info) {
+ info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
+ 1, 0);
+ if (!info)
+ goto out;
+ }
+
+have_info:
+ if (info->bitmap) {
+ u64 bit_off, bit_bytes;
+ struct rb_node *n;
+ struct btrfs_free_space *tmp;
+
+ bit_off = offset;
+ bit_bytes = ctl->unit;
+ ret = search_bitmap(ctl, info, &bit_off, &bit_bytes);
+ if (!ret) {
+ if (bit_off == offset) {
+ ret = 1;
+ goto out;
+ } else if (bit_off > offset &&
+ offset + bytes > bit_off) {
+ ret = 1;
+ goto out;
+ }
+ }
+
+ n = rb_prev(&info->offset_index);
+ while (n) {
+ tmp = rb_entry(n, struct btrfs_free_space,
+ offset_index);
+ if (tmp->offset + tmp->bytes < offset)
+ break;
+ if (offset + bytes < tmp->offset) {
+ n = rb_prev(&info->offset_index);
+ continue;
+ }
+ info = tmp;
+ goto have_info;
+ }
+
+ n = rb_next(&info->offset_index);
+ while (n) {
+ tmp = rb_entry(n, struct btrfs_free_space,
+ offset_index);
+ if (offset + bytes < tmp->offset)
+ break;
+ if (tmp->offset + tmp->bytes < offset) {
+ n = rb_next(&info->offset_index);
+ continue;
+ }
+ info = tmp;
+ goto have_info;
+ }
+
+ goto out;
+ }
+
+ if (info->offset == offset) {
+ ret = 1;
+ goto out;
+ }
+
+ if (offset > info->offset && offset < info->offset + info->bytes)
+ ret = 1;
+out:
+ spin_unlock(&ctl->tree_lock);
+ return ret;
+}
+
+/*
+ * Use this if you need to make a bitmap or extent entry specifically, it
+ * doesn't do any of the merging that add_free_space does, this acts a lot like
+ * how the free space cache loading stuff works, so you can get really weird
+ * configurations.
+ */
+static int add_free_space_entry(struct btrfs_block_group_cache *cache,
+ u64 offset, u64 bytes, bool bitmap)
+{
+ struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
+ struct btrfs_free_space *info = NULL, *bitmap_info;
+ void *map = NULL;
+ u64 bytes_added;
+ int ret;
+
+again:
+ if (!info) {
+ info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
+ if (!info)
+ return -ENOMEM;
+ }
+
+ if (!bitmap) {
+ spin_lock(&ctl->tree_lock);
+ info->offset = offset;
+ info->bytes = bytes;
+ ret = link_free_space(ctl, info);
+ spin_unlock(&ctl->tree_lock);
+ if (ret)
+ kmem_cache_free(btrfs_free_space_cachep, info);
+ return ret;
+ }
+
+ if (!map) {
+ map = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+ if (!map) {
+ kmem_cache_free(btrfs_free_space_cachep, info);
+ return -ENOMEM;
+ }
+ }
+
+ spin_lock(&ctl->tree_lock);
+ bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
+ 1, 0);
+ if (!bitmap_info) {
+ info->bitmap = map;
+ map = NULL;
+ add_new_bitmap(ctl, info, offset);
+ bitmap_info = info;
+ }
+
+ bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
+ bytes -= bytes_added;
+ offset += bytes_added;
+ spin_unlock(&ctl->tree_lock);
+
+ if (bytes)
+ goto again;
+
+ if (map)
+ kfree(map);
+ return 0;
+}
+
+/*
+ * This test just does basic sanity checking, making sure we can add an exten
+ * entry and remove space from either end and the middle, and make sure we can
+ * remove space that covers adjacent extent entries.
+ */
+static int test_extents(struct btrfs_block_group_cache *cache)
+{
+ int ret = 0;
+
+ printk(KERN_ERR "Running extent only tests\n");
+
+ /* First just make sure we can remove an entire entry */
+ ret = btrfs_add_free_space(cache, 0, 4 * 1024 * 1024);
+ if (ret) {
+ printk(KERN_ERR "Error adding initial extents %d\n", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, 0, 4 * 1024 * 1024);
+ if (ret) {
+ printk(KERN_ERR "Error removing extent %d\n", ret);
+ return ret;
+ }
+
+ if (check_exists(cache, 0, 4 * 1024 * 1024)) {
+ printk(KERN_ERR "Full remove left some lingering space\n");
+ return -1;
+ }
+
+ /* Ok edge and middle cases now */
+ ret = btrfs_add_free_space(cache, 0, 4 * 1024 * 1024);
+ if (ret) {
+ printk(KERN_ERR "Error adding half extent %d\n", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, 3 * 1024 * 1024, 1 * 1024 * 1024);
+ if (ret) {
+ printk(KERN_ERR "Error removing tail end %d\n", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, 0, 1 * 1024 * 1024);
+ if (ret) {
+ printk(KERN_ERR "Error removing front end %d\n", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, 2 * 1024 * 1024, 4096);
+ if (ret) {
+ printk(KERN_ERR "Error removing middle peice %d\n", ret);
+ return ret;
+ }
+
+ if (check_exists(cache, 0, 1 * 1024 * 1024)) {
+ printk(KERN_ERR "Still have space at the front\n");
+ return -1;
+ }
+
+ if (check_exists(cache, 2 * 1024 * 1024, 4096)) {
+ printk(KERN_ERR "Still have space in the middle\n");
+ return -1;
+ }
+
+ if (check_exists(cache, 3 * 1024 * 1024, 1 * 1024 * 1024)) {
+ printk(KERN_ERR "Still have space at the end\n");
+ return -1;
+ }
+
+ /* Cleanup */
+ __btrfs_remove_free_space_cache(cache->free_space_ctl);
+
+ return 0;
+}
+
+static int test_bitmaps(struct btrfs_block_group_cache *cache)
+{
+ u64 next_bitmap_offset;
+ int ret;
+
+ printk(KERN_ERR "Running bitmap only tests\n");
+
+ ret = add_free_space_entry(cache, 0, 4 * 1024 * 1024, 1);
+ if (ret) {
+ printk(KERN_ERR "Couldn't create a bitmap entry %d\n", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, 0, 4 * 1024 * 1024);
+ if (ret) {
+ printk(KERN_ERR "Error removing bitmap full range %d\n", ret);
+ return ret;
+ }
+
+ if (check_exists(cache, 0, 4 * 1024 * 1024)) {
+ printk(KERN_ERR "Left some space in bitmap\n");
+ return -1;
+ }
+
+ ret = add_free_space_entry(cache, 0, 4 * 1024 * 1024, 1);
+ if (ret) {
+ printk(KERN_ERR "Couldn't add to our bitmap entry %d\n", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, 1 * 1024 * 1024, 2 * 1024 * 1024);
+ if (ret) {
+ printk(KERN_ERR "Couldn't remove middle chunk %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * The first bitmap we have starts at offset 0 so the next one is just
+ * at the end of the first bitmap.
+ */
+ next_bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
+
+ /* Test a bit straddling two bitmaps */
+ ret = add_free_space_entry(cache, next_bitmap_offset -
+ (2 * 1024 * 1024), 4 * 1024 * 1024, 1);
+ if (ret) {
+ printk(KERN_ERR "Couldn't add space that straddles two bitmaps"
+ " %d\n", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, next_bitmap_offset -
+ (1 * 1024 * 1024), 2 * 1024 * 1024);
+ if (ret) {
+ printk(KERN_ERR "Couldn't remove overlapping space %d\n", ret);
+ return ret;
+ }
+
+ if (check_exists(cache, next_bitmap_offset - (1 * 1024 * 1024),
+ 2 * 1024 * 1024)) {
+ printk(KERN_ERR "Left some space when removing overlapping\n");
+ return -1;
+ }
+
+ __btrfs_remove_free_space_cache(cache->free_space_ctl);
+
+ return 0;
+}
+
+/* This is the high grade jackassery */
+static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
+{
+ u64 bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
+ int ret;
+
+ printk(KERN_ERR "Running bitmap and extent tests\n");
+
+ /*
+ * First let's do something simple, an extent at the same offset as the
+ * bitmap, but the free space completely in the extent and then
+ * completely in the bitmap.
+ */
+ ret = add_free_space_entry(cache, 4 * 1024 * 1024, 1 * 1024 * 1024, 1);
+ if (ret) {
+ printk(KERN_ERR "Couldn't create bitmap entry %d\n", ret);
+ return ret;
+ }
+
+ ret = add_free_space_entry(cache, 0, 1 * 1024 * 1024, 0);
+ if (ret) {
+ printk(KERN_ERR "Couldn't add extent entry %d\n", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, 0, 1 * 1024 * 1024);
+ if (ret) {
+ printk(KERN_ERR "Couldn't remove extent entry %d\n", ret);
+ return ret;
+ }
+
+ if (check_exists(cache, 0, 1 * 1024 * 1024)) {
+ printk(KERN_ERR "Left remnants after our remove\n");
+ return -1;
+ }
+
+ /* Now to add back the extent entry and remove from the bitmap */
+ ret = add_free_space_entry(cache, 0, 1 * 1024 * 1024, 0);
+ if (ret) {
+ printk(KERN_ERR "Couldn't re-add extent entry %d\n", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, 4 * 1024 * 1024, 1 * 1024 * 1024);
+ if (ret) {
+ printk(KERN_ERR "Couldn't remove from bitmap %d\n", ret);
+ return ret;
+ }
+
+ if (check_exists(cache, 4 * 1024 * 1024, 1 * 1024 * 1024)) {
+ printk(KERN_ERR "Left remnants in the bitmap\n");
+ return -1;
+ }
+
+ /*
+ * Ok so a little more evil, extent entry and bitmap at the same offset,
+ * removing an overlapping chunk.
+ */
+ ret = add_free_space_entry(cache, 1 * 1024 * 1024, 4 * 1024 * 1024, 1);
+ if (ret) {
+ printk(KERN_ERR "Couldn't add to a bitmap %d\n", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, 512 * 1024, 3 * 1024 * 1024);
+ if (ret) {
+ printk(KERN_ERR "Couldn't remove overlapping space %d\n", ret);
+ return ret;
+ }
+
+ if (check_exists(cache, 512 * 1024, 3 * 1024 * 1024)) {
+ printk(KERN_ERR "Left over peices after removing "
+ "overlapping\n");
+ return -1;
+ }
+
+ __btrfs_remove_free_space_cache(cache->free_space_ctl);
+
+ /* Now with the extent entry offset into the bitmap */
+ ret = add_free_space_entry(cache, 4 * 1024 * 1024, 4 * 1024 * 1024, 1);
+ if (ret) {
+ printk(KERN_ERR "Couldn't add space to the bitmap %d\n", ret);
+ return ret;
+ }
+
+ ret = add_free_space_entry(cache, 2 * 1024 * 1024, 2 * 1024 * 1024, 0);
+ if (ret) {
+ printk(KERN_ERR "Couldn't add extent to the cache %d\n", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, 3 * 1024 * 1024, 4 * 1024 * 1024);
+ if (ret) {
+ printk(KERN_ERR "Problem removing overlapping space %d\n", ret);
+ return ret;
+ }
+
+ if (check_exists(cache, 3 * 1024 * 1024, 4 * 1024 * 1024)) {
+ printk(KERN_ERR "Left something behind when removing space");
+ return -1;
+ }
+
+ /*
+ * This has blown up in the past, the extent entry starts before the
+ * bitmap entry, but we're trying to remove an offset that falls
+ * completely within the bitmap range and is in both the extent entry
+ * and the bitmap entry, looks like this
+ *
+ * [ extent ]
+ * [ bitmap ]
+ * [ del ]
+ */
+ __btrfs_remove_free_space_cache(cache->free_space_ctl);
+ ret = add_free_space_entry(cache, bitmap_offset + 4 * 1024 * 1024,
+ 4 * 1024 * 1024, 1);
+ if (ret) {
+ printk(KERN_ERR "Couldn't add bitmap %d\n", ret);
+ return ret;
+ }
+
+ ret = add_free_space_entry(cache, bitmap_offset - 1 * 1024 * 1024,
+ 5 * 1024 * 1024, 0);
+ if (ret) {
+ printk(KERN_ERR "Couldn't add extent entry %d\n", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, bitmap_offset + 1 * 1024 * 1024,
+ 5 * 1024 * 1024);
+ if (ret) {
+ printk(KERN_ERR "Failed to free our space %d\n", ret);
+ return ret;
+ }
+
+ if (check_exists(cache, bitmap_offset + 1 * 1024 * 1024,
+ 5 * 1024 * 1024)) {
+ printk(KERN_ERR "Left stuff over\n");
+ return -1;
+ }
+
+ __btrfs_remove_free_space_cache(cache->free_space_ctl);
+
+ /*
+ * This blew up before, we have part of the free space in a bitmap and
+ * then the entirety of the rest of the space in an extent. This used
+ * to return -EAGAIN back from btrfs_remove_extent, make sure this
+ * doesn't happen.
+ */
+ ret = add_free_space_entry(cache, 1 * 1024 * 1024, 2 * 1024 * 1024, 1);
+ if (ret) {
+ printk(KERN_ERR "Couldn't add bitmap entry %d\n", ret);
+ return ret;
+ }
+
+ ret = add_free_space_entry(cache, 3 * 1024 * 1024, 1 * 1024 * 1024, 0);
+ if (ret) {
+ printk(KERN_ERR "Couldn't add extent entry %d\n", ret);
+ return ret;
+ }
+
+ ret = btrfs_remove_free_space(cache, 1 * 1024 * 1024, 3 * 1024 * 1024);
+ if (ret) {
+ printk(KERN_ERR "Error removing bitmap and extent "
+ "overlapping %d\n", ret);
+ return ret;
+ }
+
+ __btrfs_remove_free_space_cache(cache->free_space_ctl);
+ return 0;
+}
+
+void btrfs_test_free_space_cache(void)
+{
+ struct btrfs_block_group_cache *cache;
+
+ printk(KERN_ERR "Running btrfs free space cache tests\n");
+
+ cache = init_test_block_group();
+ if (!cache) {
+ printk(KERN_ERR "Couldn't run the tests\n");
+ return;
+ }
+
+ if (test_extents(cache))
+ goto out;
+ if (test_bitmaps(cache))
+ goto out;
+ if (test_bitmaps_and_extents(cache))
+ goto out;
+out:
+ __btrfs_remove_free_space_cache(cache->free_space_ctl);
+ kfree(cache->free_space_ctl);
+ kfree(cache);
+ printk(KERN_ERR "Free space cache tests finished\n");
+}
+#endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */
struct btrfs_free_cluster *cluster);
int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
u64 *trimmed, u64 start, u64 end, u64 minlen);
+
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+void btrfs_test_free_space_cache(void);
+#endif
+
#endif
return -ENOENT;
}
-int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- const char *name, int name_len,
- u64 inode_objectid, u64 ref_objectid, u64 *index)
+static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ const char *name, int name_len,
+ u64 inode_objectid, u64 ref_objectid,
+ u64 *index)
{
struct btrfs_path *path;
struct btrfs_key key;
memmove_extent_buffer(leaf, ptr, ptr + del_len,
item_size - (ptr + del_len - item_start));
- btrfs_truncate_item(trans, root, path, item_size - del_len, 1);
+ btrfs_truncate_item(root, path, item_size - del_len, 1);
out:
btrfs_free_path(path);
item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_size - (ptr + sub_item_len - item_start));
- btrfs_truncate_item(trans, root, path, item_size - sub_item_len, 1);
+ btrfs_truncate_item(root, path, item_size - sub_item_len, 1);
out:
btrfs_free_path(path);
name, name_len, NULL))
goto out;
- btrfs_extend_item(trans, root, path, ins_len);
+ btrfs_extend_item(root, path, ins_len);
ret = 0;
}
if (ret < 0)
goto out;
old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
- btrfs_extend_item(trans, root, path, ins_len);
+ btrfs_extend_item(root, path, ins_len);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref);
ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
u64 len, u64 orig_start,
u64 block_start, u64 block_len,
- u64 orig_block_len, int type);
+ u64 orig_block_len, u64 ram_bytes,
+ int type);
+
+static int btrfs_dirty_inode(struct inode *inode);
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir,
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->orig_block_len = ins.offset;
+ em->ram_bytes = async_extent->ram_size;
em->bdev = root->fs_info->fs_devices->latest_bdev;
em->compress_type = async_extent->compress_type;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
while (1) {
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em);
- if (!ret)
- list_move(&em->list,
- &em_tree->modified_extents);
+ ret = add_extent_mapping(em_tree, em, 1);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
}
em = alloc_extent_map();
- BUG_ON(!em); /* -ENOMEM */
+ if (!em)
+ goto out_reserve;
em->start = start;
em->orig_start = em->start;
ram_size = ins.offset;
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->orig_block_len = ins.offset;
+ em->ram_bytes = ram_size;
em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
em->generation = -1;
while (1) {
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em);
- if (!ret)
- list_move(&em->list,
- &em_tree->modified_extents);
+ ret = add_extent_mapping(em_tree, em, 1);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
btrfs_drop_extent_cache(inode, start,
start + ram_size - 1, 0);
}
+ if (ret)
+ goto out_reserve;
cur_alloc_size = ins.offset;
ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
ram_size, cur_alloc_size, 0);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret)
+ goto out_reserve;
if (root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
cur_alloc_size);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
- goto out_unlock;
+ goto out_reserve;
}
}
out:
return ret;
+out_reserve:
+ btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
out_unlock:
extent_clear_unlock_delalloc(inode,
&BTRFS_I(inode)->io_tree,
u64 disk_bytenr;
u64 num_bytes;
u64 disk_num_bytes;
+ u64 ram_bytes;
int extent_type;
int ret, err;
int type;
struct btrfs_file_extent_item);
extent_type = btrfs_file_extent_type(leaf, fi);
+ ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
if (extent_type == BTRFS_FILE_EXTENT_REG ||
extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
em->block_len = num_bytes;
em->block_start = disk_bytenr;
em->orig_block_len = disk_num_bytes;
+ em->ram_bytes = ram_bytes;
em->bdev = root->fs_info->fs_devices->latest_bdev;
em->mod_start = em->start;
em->mod_len = em->len;
em->generation = -1;
while (1) {
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em);
- if (!ret)
- list_move(&em->list,
- &em_tree->modified_extents);
+ ret = add_extent_mapping(em_tree, em, 1);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
* have pending delalloc work to be done.
*/
static void btrfs_set_bit_hook(struct inode *inode,
- struct extent_state *state, int *bits)
+ struct extent_state *state, unsigned long *bits)
{
/*
* extent_io.c clear_bit_hook, see set_bit_hook for why
*/
static void btrfs_clear_bit_hook(struct inode *inode,
- struct extent_state *state, int *bits)
+ struct extent_state *state,
+ unsigned long *bits)
{
/*
* set_bit and clear bit hooks normally require _irqsave/restore
int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
u32 csum = ~(u32)0;
+ static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
if (PageChecked(page)) {
ClearPageChecked(page);
if (ret)
goto zeroit;
- csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
+ csum = btrfs_csum_data(kaddr + offset, csum, end - start + 1);
btrfs_csum_final(csum, (char *)&csum);
if (csum != private)
goto zeroit;
return 0;
zeroit:
- printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
- "private %llu\n",
- (unsigned long long)btrfs_ino(page->mapping->host),
- (unsigned long long)start, csum,
- (unsigned long long)private);
+ if (__ratelimit(&_rs))
+ btrfs_info(root->fs_info, "csum failed ino %llu off %llu csum %u private %llu",
+ (unsigned long long)btrfs_ino(page->mapping->host),
+ (unsigned long long)start, csum,
+ (unsigned long long)private);
memset(kaddr + offset, 1, end - start + 1);
flush_dcache_page(page);
kunmap_atomic(kaddr);
* We have done the truncate/delete so we can go ahead and remove the orphan
* item for this particular inode.
*/
-int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
+static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
+ struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int delete_item = 0;
*/
if (found_key.offset == last_objectid) {
- printk(KERN_ERR "btrfs: Error removing orphan entry, "
- "stopping orphan cleanup\n");
+ btrfs_err(root->fs_info,
+ "Error removing orphan entry, stopping orphan cleanup");
ret = -EINVAL;
goto out;
}
ret = PTR_ERR(trans);
goto out;
}
- printk(KERN_ERR "auto deleting %Lu\n",
- found_key.objectid);
+ btrfs_debug(root->fs_info, "auto deleting %Lu",
+ found_key.objectid);
ret = btrfs_del_orphan_item(trans, root,
found_key.objectid);
BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
}
if (nr_unlink)
- printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
+ btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
if (nr_truncate)
- printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
+ btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
out:
if (ret)
- printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
+ btrfs_crit(root->fs_info,
+ "could not do orphan cleanup %d", ret);
btrfs_free_path(path);
return ret;
}
ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
dir_ino, &index);
if (ret) {
- printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
- "inode %llu parent %llu\n", name_len, name,
- (unsigned long long)ino, (unsigned long long)dir_ino);
+ btrfs_info(root->fs_info,
+ "failed to delete reference to %.*s, inode %llu parent %llu",
+ name_len, name,
+ (unsigned long long)ino, (unsigned long long)dir_ino);
btrfs_abort_transaction(trans, root, ret);
goto err;
}
dir, index);
if (ret == -ENOENT)
ret = 0;
+ else if (ret)
+ btrfs_abort_transaction(trans, root, ret);
err:
btrfs_free_path(path);
if (ret)
eb = path->nodes[level];
if (!btrfs_block_can_be_shared(root, eb))
continue;
- ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
+ ret = btrfs_lookup_extent_info(NULL, root, eb->start, level, 1,
&refs, NULL);
if (refs > 1)
return 1;
}
size =
btrfs_file_extent_calc_inline_size(size);
- btrfs_truncate_item(trans, root, path,
- size, 1);
+ btrfs_truncate_item(root, path, size, 1);
} else if (root->ref_cows) {
inode_sub_bytes(inode, item_end + 1 -
found_key.offset);
hole_em->block_start = EXTENT_MAP_HOLE;
hole_em->block_len = 0;
hole_em->orig_block_len = 0;
+ hole_em->ram_bytes = hole_size;
hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
hole_em->compress_type = BTRFS_COMPRESS_NONE;
hole_em->generation = trans->transid;
while (1) {
write_lock(&em_tree->lock);
- err = add_extent_mapping(em_tree, hole_em);
- if (!err)
- list_move(&hole_em->list,
- &em_tree->modified_extents);
+ err = add_extent_mapping(em_tree, hole_em, 1);
write_unlock(&em_tree->lock);
if (err != -EEXIST)
break;
ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
if (ret) {
- printk(KERN_WARNING "Could not get space for a "
- "delete, will truncate on mount %d\n", ret);
+ btrfs_warn(root->fs_info,
+ "Could not get space for a delete, will truncate on mount %d",
+ ret);
btrfs_orphan_del(NULL, inode);
btrfs_free_block_rsv(root, rsv);
goto no_delete;
* FIXME, needs more benchmarking...there are no reasons other than performance
* to keep or drop this code.
*/
-int btrfs_dirty_inode(struct inode *inode)
+static int btrfs_dirty_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
em->block_start += start_diff;
em->block_len -= start_diff;
}
- return add_extent_mapping(em_tree, em);
+ return add_extent_mapping(em_tree, em, 0);
}
static noinline int uncompress_inline(struct btrfs_path *path,
goto not_found_em;
}
+ em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
if (found_type == BTRFS_FILE_EXTENT_REG ||
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
em->start = extent_start;
insert:
btrfs_release_path(path);
if (em->start > start || extent_map_end(em) <= start) {
- printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
- "[%llu %llu]\n", (unsigned long long)em->start,
- (unsigned long long)em->len,
- (unsigned long long)start,
- (unsigned long long)len);
+ btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
+ (unsigned long long)em->start,
+ (unsigned long long)em->len,
+ (unsigned long long)start,
+ (unsigned long long)len);
err = -EIO;
goto out;
}
err = 0;
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em);
+ ret = add_extent_mapping(em_tree, em, 0);
/* it is possible that someone inserted the extent into the tree
* while we had the lock dropped. It is also possible that
* an overlapping map exists in the tree
}
em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
- ins.offset, ins.offset, 0);
+ ins.offset, ins.offset, ins.offset, 0);
if (IS_ERR(em))
goto out;
* block must be cow'd
*/
static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
- struct inode *inode, u64 offset, u64 len)
+ struct inode *inode, u64 offset, u64 *len,
+ u64 *orig_start, u64 *orig_block_len,
+ u64 *ram_bytes)
{
struct btrfs_path *path;
int ret;
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
backref_offset = btrfs_file_extent_offset(leaf, fi);
+ *orig_start = key.offset - backref_offset;
+ *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
+ *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
+
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
- if (extent_end < offset + len) {
+ if (extent_end < offset + *len) {
/* extent doesn't include our full range, must cow */
goto out;
}
*/
disk_bytenr += backref_offset;
disk_bytenr += offset - key.offset;
- num_bytes = min(offset + len, extent_end) - offset;
+ num_bytes = min(offset + *len, extent_end) - offset;
if (csum_exist_in_range(root, disk_bytenr, num_bytes))
goto out;
/*
* all of the above have passed, it is safe to overwrite this extent
* without cow
*/
+ *len = num_bytes;
ret = 1;
out:
btrfs_free_path(path);
static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
u64 len, u64 orig_start,
u64 block_start, u64 block_len,
- u64 orig_block_len, int type)
+ u64 orig_block_len, u64 ram_bytes,
+ int type)
{
struct extent_map_tree *em_tree;
struct extent_map *em;
em->block_start = block_start;
em->bdev = root->fs_info->fs_devices->latest_bdev;
em->orig_block_len = orig_block_len;
+ em->ram_bytes = ram_bytes;
em->generation = -1;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
if (type == BTRFS_ORDERED_PREALLOC)
btrfs_drop_extent_cache(inode, em->start,
em->start + em->len - 1, 0);
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em);
- if (!ret)
- list_move(&em->list,
- &em_tree->modified_extents);
+ ret = add_extent_mapping(em_tree, em, 1);
write_unlock(&em_tree->lock);
} while (ret == -EEXIST);
em->block_start != EXTENT_MAP_HOLE)) {
int type;
int ret;
- u64 block_start;
+ u64 block_start, orig_start, orig_block_len, ram_bytes;
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
type = BTRFS_ORDERED_PREALLOC;
if (IS_ERR(trans))
goto must_cow;
- if (can_nocow_odirect(trans, inode, start, len) == 1) {
- u64 orig_start = em->orig_start;
- u64 orig_block_len = em->orig_block_len;
-
+ if (can_nocow_odirect(trans, inode, start, &len, &orig_start,
+ &orig_block_len, &ram_bytes) == 1) {
if (type == BTRFS_ORDERED_PREALLOC) {
free_extent_map(em);
em = create_pinned_em(inode, start, len,
orig_start,
block_start, len,
- orig_block_len, type);
+ orig_block_len,
+ ram_bytes, type);
if (IS_ERR(em)) {
btrfs_end_transaction(trans, root);
goto unlock_err;
goto failed;
local_irq_save(flags);
kaddr = kmap_atomic(page);
- csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
+ csum = btrfs_csum_data(kaddr + bvec->bv_offset,
csum, bvec->bv_len);
btrfs_csum_final(csum, (char *)&csum);
kunmap_atomic(kaddr);
flush_dcache_page(bvec->bv_page);
if (csum != private) {
failed:
- printk(KERN_ERR "btrfs csum failed ino %llu off"
- " %llu csum %u private %u\n",
- (unsigned long long)btrfs_ino(inode),
- (unsigned long long)start,
- csum, (unsigned)private);
+ btrfs_err(root->fs_info, "csum failed ino %llu off %llu csum %u private %u",
+ (unsigned long long)btrfs_ino(inode),
+ (unsigned long long)start,
+ csum, (unsigned)private);
err = -EIO;
}
}
return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
}
-int btrfs_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
+static int btrfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
struct extent_io_tree *tree;
if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags)) {
- printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
- (unsigned long long)btrfs_ino(inode));
+ btrfs_info(root->fs_info, "inode %llu still on the orphan list",
+ (unsigned long long)btrfs_ino(inode));
atomic_dec(&root->orphan_inodes);
}
if (!ordered)
break;
else {
- printk(KERN_ERR "btrfs found ordered "
- "extent %llu %llu on inode cleanup\n",
- (unsigned long long)ordered->file_offset,
- (unsigned long long)ordered->len);
+ btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
+ (unsigned long long)ordered->file_offset,
+ (unsigned long long)ordered->len);
btrfs_remove_ordered_extent(inode, ordered);
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->orig_block_len = ins.offset;
+ em->ram_bytes = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
em->generation = trans->transid;
while (1) {
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em);
- if (!ret)
- list_move(&em->list,
- &em_tree->modified_extents);
+ ret = add_extent_mapping(em_tree, em, 1);
write_unlock(&em_tree->lock);
if (ret != -EEXIST)
break;
struct dentry *dentry;
int error;
- mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
+ error = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
+ if (error == -EINTR)
+ return error;
dentry = lookup_one_len(name, parent->dentry, namelen);
error = PTR_ERR(dentry);
u64 new_align = ~((u64)128 * 1024 - 1);
struct page **pages = NULL;
- if (extent_thresh == 0)
- extent_thresh = 256 * 1024;
+ if (isize == 0)
+ return 0;
+
+ if (range->start >= isize)
+ return -EINVAL;
if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
if (range->compress_type > BTRFS_COMPRESS_TYPES)
compress_type = range->compress_type;
}
- if (isize == 0)
- return 0;
+ if (extent_thresh == 0)
+ extent_thresh = 256 * 1024;
/*
* if we were not given a file, allocate a readahead
if (err)
goto out;
- mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
+ err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
+ if (err == -EINTR)
+ goto out;
dentry = lookup_one_len(vol_args->name, parent, namelen);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
mutex_lock(&fs_devices->device_list_mutex);
dev = btrfs_find_device(root->fs_info, di_args->devid, s_uuid, NULL);
- mutex_unlock(&fs_devices->device_list_mutex);
if (!dev) {
ret = -ENODEV;
}
out:
+ mutex_unlock(&fs_devices->device_list_mutex);
if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
ret = -EFAULT;
}
}
-long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
{
struct btrfs_ioctl_space_args space_args;
struct btrfs_ioctl_space_info space;
goto drop_write;
}
- if (sa->cmd != BTRFS_QUOTA_CTL_RESCAN) {
- trans = btrfs_start_transaction(root, 2);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- goto out;
- }
+ down_write(&root->fs_info->subvol_sem);
+ trans = btrfs_start_transaction(root->fs_info->tree_root, 2);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
}
switch (sa->cmd) {
case BTRFS_QUOTA_CTL_DISABLE:
ret = btrfs_quota_disable(trans, root->fs_info);
break;
- case BTRFS_QUOTA_CTL_RESCAN:
- ret = btrfs_quota_rescan(root->fs_info);
- break;
default:
ret = -EINVAL;
break;
if (copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
- if (trans) {
- err = btrfs_commit_transaction(trans, root);
- if (err && !ret)
- ret = err;
- }
+ err = btrfs_commit_transaction(trans, root->fs_info->tree_root);
+ if (err && !ret)
+ ret = err;
out:
kfree(sa);
+ up_write(&root->fs_info->subvol_sem);
drop_write:
mnt_drop_write_file(file);
return ret;
return ret;
}
+static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
+{
+ struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+ struct btrfs_ioctl_quota_rescan_args *qsa;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = mnt_want_write_file(file);
+ if (ret)
+ return ret;
+
+ qsa = memdup_user(arg, sizeof(*qsa));
+ if (IS_ERR(qsa)) {
+ ret = PTR_ERR(qsa);
+ goto drop_write;
+ }
+
+ if (qsa->flags) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_qgroup_rescan(root->fs_info);
+
+out:
+ kfree(qsa);
+drop_write:
+ mnt_drop_write_file(file);
+ return ret;
+}
+
+static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
+{
+ struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+ struct btrfs_ioctl_quota_rescan_args *qsa;
+ int ret = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ qsa = kzalloc(sizeof(*qsa), GFP_NOFS);
+ if (!qsa)
+ return -ENOMEM;
+
+ if (root->fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
+ qsa->flags = 1;
+ qsa->progress = root->fs_info->qgroup_rescan_progress.objectid;
+ }
+
+ if (copy_to_user(arg, qsa, sizeof(*qsa)))
+ ret = -EFAULT;
+
+ kfree(qsa);
+ return ret;
+}
+
static long btrfs_ioctl_set_received_subvol(struct file *file,
void __user *arg)
{
return btrfs_ioctl_qgroup_create(file, argp);
case BTRFS_IOC_QGROUP_LIMIT:
return btrfs_ioctl_qgroup_limit(file, argp);
+ case BTRFS_IOC_QUOTA_RESCAN:
+ return btrfs_ioctl_quota_rescan(file, argp);
+ case BTRFS_IOC_QUOTA_RESCAN_STATUS:
+ return btrfs_ioctl_quota_rescan_status(file, argp);
case BTRFS_IOC_DEV_REPLACE:
return btrfs_ioctl_dev_replace(root, argp);
case BTRFS_IOC_GET_FSLABEL:
#include "extent_io.h"
#include "locking.h"
-void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
+static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
/*
* if we currently have a spinning reader or writer lock
BUG_ON(!atomic_read(&eb->write_locks));
}
-void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
+static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
{
BUG_ON(!atomic_read(&eb->read_locks));
}
* be reclaimed before their checksum is actually put into the btree
*/
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
- u32 *sum)
+ u32 *sum, int len)
{
struct btrfs_ordered_sum *ordered_sum;
struct btrfs_sector_sum *sector_sums;
unsigned long num_sectors;
unsigned long i;
u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
- int ret = 1;
+ int index = 0;
ordered = btrfs_lookup_ordered_extent(inode, offset);
if (!ordered)
- return 1;
+ return 0;
spin_lock_irq(&tree->lock);
list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
- if (disk_bytenr >= ordered_sum->bytenr) {
- num_sectors = ordered_sum->len / sectorsize;
- sector_sums = ordered_sum->sums;
- for (i = 0; i < num_sectors; i++) {
+ if (disk_bytenr >= ordered_sum->bytenr &&
+ disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
+ i = (disk_bytenr - ordered_sum->bytenr) >>
+ inode->i_sb->s_blocksize_bits;
+ sector_sums = ordered_sum->sums + i;
+ num_sectors = ordered_sum->len >>
+ inode->i_sb->s_blocksize_bits;
+ for (; i < num_sectors; i++) {
if (sector_sums[i].bytenr == disk_bytenr) {
- *sum = sector_sums[i].sum;
- ret = 0;
- goto out;
+ sum[index] = sector_sums[i].sum;
+ index++;
+ if (index == len)
+ goto out;
+ disk_bytenr += sectorsize;
}
}
}
out:
spin_unlock_irq(&tree->lock);
btrfs_put_ordered_extent(ordered);
- return ret;
+ return index;
}
u64 len);
int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_extent *ordered);
-int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
+int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
+ u32 *sum, int len);
int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int wait);
void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
nr = btrfs_header_nritems(l);
- printk(KERN_INFO "leaf %llu total ptrs %d free space %d\n",
+ btrfs_info(root->fs_info, "leaf %llu total ptrs %d free space %d",
(unsigned long long)btrfs_header_bytenr(l), nr,
btrfs_leaf_free_space(root, l));
for (i = 0 ; i < nr ; i++) {
btrfs_print_leaf(root, c);
return;
}
- printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n",
- (unsigned long long)btrfs_header_bytenr(c),
- level, nr,
- (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
+ btrfs_info(root->fs_info, "node %llu level %d total ptrs %d free spc %u",
+ (unsigned long long)btrfs_header_bytenr(c),
+ level, nr, (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
for (i = 0; i < nr; i++) {
btrfs_node_key_to_cpu(c, &key, i);
printk(KERN_INFO "\tkey %d (%llu %u %llu) block %llu\n",
#ifndef __PRINT_TREE_
#define __PRINT_TREE_
void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l);
-void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *t);
+void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c);
#endif
#include "locking.h"
#include "ulist.h"
#include "backref.h"
+#include "extent_io.h"
/* TODO XXX FIXME
* - subvol delete -> delete when ref goes to 0? delete limits also?
* - reorganize keys
* - compressed
* - sync
- * - rescan
* - copy also limits on subvol creation
* - limit
* - caches fuer ulists
struct btrfs_qgroup *member;
};
-/* must be called with qgroup_lock held */
+struct qgroup_rescan {
+ struct btrfs_work work;
+ struct btrfs_fs_info *fs_info;
+};
+
+static void qgroup_rescan_start(struct btrfs_fs_info *fs_info,
+ struct qgroup_rescan *qscan);
+
+/* must be called with qgroup_ioctl_lock held */
static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
u64 qgroupid)
{
}
fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
ptr);
- /* FIXME read scan element */
+ fs_info->qgroup_rescan_progress.objectid =
+ btrfs_qgroup_status_rescan(l, ptr);
+ if (fs_info->qgroup_flags &
+ BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
+ struct qgroup_rescan *qscan =
+ kmalloc(sizeof(*qscan), GFP_NOFS);
+ if (!qscan) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ fs_info->qgroup_rescan_progress.type = 0;
+ fs_info->qgroup_rescan_progress.offset = 0;
+ qgroup_rescan_start(fs_info, qscan);
+ }
goto next1;
}
qgroup = rb_entry(n, struct btrfs_qgroup, node);
rb_erase(n, &fs_info->qgroup_tree);
- WARN_ON(!list_empty(&qgroup->dirty));
-
while (!list_empty(&qgroup->groups)) {
list = list_first_entry(&qgroup->groups,
struct btrfs_qgroup_list,
ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
- /* XXX scan */
+ btrfs_set_qgroup_status_rescan(l, ptr,
+ fs_info->qgroup_rescan_progress.objectid);
btrfs_mark_buffer_dirty(l);
struct btrfs_fs_info *fs_info)
{
struct btrfs_root *quota_root;
+ struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_path *path = NULL;
struct btrfs_qgroup_status_item *ptr;
struct extent_buffer *leaf;
struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct btrfs_qgroup *qgroup = NULL;
int ret = 0;
+ int slot;
- spin_lock(&fs_info->qgroup_lock);
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
if (fs_info->quota_root) {
fs_info->pending_quota_state = 1;
- spin_unlock(&fs_info->qgroup_lock);
goto out;
}
- spin_unlock(&fs_info->qgroup_lock);
/*
* initially create the quota tree
fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
- btrfs_set_qgroup_status_scan(leaf, ptr, 0);
+ btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
btrfs_mark_buffer_dirty(leaf);
+ key.objectid = 0;
+ key.type = BTRFS_ROOT_REF_KEY;
+ key.offset = 0;
+
+ btrfs_release_path(path);
+ ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
+ if (ret > 0)
+ goto out_add_root;
+ if (ret < 0)
+ goto out_free_path;
+
+
+ while (1) {
+ slot = path->slots[0];
+ leaf = path->nodes[0];
+ btrfs_item_key_to_cpu(leaf, &found_key, slot);
+
+ if (found_key.type == BTRFS_ROOT_REF_KEY) {
+ ret = add_qgroup_item(trans, quota_root,
+ found_key.offset);
+ if (ret)
+ goto out_free_path;
+
+ qgroup = add_qgroup_rb(fs_info, found_key.offset);
+ if (IS_ERR(qgroup)) {
+ ret = PTR_ERR(qgroup);
+ goto out_free_path;
+ }
+ }
+ ret = btrfs_next_item(tree_root, path);
+ if (ret < 0)
+ goto out_free_path;
+ if (ret)
+ break;
+ }
+
+out_add_root:
+ btrfs_release_path(path);
+ ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
+ if (ret)
+ goto out_free_path;
+
+ qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
+ if (IS_ERR(qgroup)) {
+ ret = PTR_ERR(qgroup);
+ goto out_free_path;
+ }
spin_lock(&fs_info->qgroup_lock);
fs_info->quota_root = quota_root;
fs_info->pending_quota_state = 1;
kfree(quota_root);
}
out:
+ mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
}
struct btrfs_root *quota_root;
int ret = 0;
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
+ if (!fs_info->quota_root)
+ goto out;
spin_lock(&fs_info->qgroup_lock);
- if (!fs_info->quota_root) {
- spin_unlock(&fs_info->qgroup_lock);
- return 0;
- }
fs_info->quota_enabled = 0;
fs_info->pending_quota_state = 0;
quota_root = fs_info->quota_root;
btrfs_free_qgroup_config(fs_info);
spin_unlock(&fs_info->qgroup_lock);
- if (!quota_root)
- return -EINVAL;
+ if (!quota_root) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = btrfs_clean_quota_tree(trans, quota_root);
if (ret)
free_extent_buffer(quota_root->commit_root);
kfree(quota_root);
out:
+ mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
}
-int btrfs_quota_rescan(struct btrfs_fs_info *fs_info)
+static void qgroup_dirty(struct btrfs_fs_info *fs_info,
+ struct btrfs_qgroup *qgroup)
{
- /* FIXME */
- return 0;
+ if (list_empty(&qgroup->dirty))
+ list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
}
int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 src, u64 dst)
{
struct btrfs_root *quota_root;
+ struct btrfs_qgroup *parent;
+ struct btrfs_qgroup *member;
+ struct btrfs_qgroup_list *list;
int ret = 0;
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
quota_root = fs_info->quota_root;
- if (!quota_root)
- return -EINVAL;
+ if (!quota_root) {
+ ret = -EINVAL;
+ goto out;
+ }
+ member = find_qgroup_rb(fs_info, src);
+ parent = find_qgroup_rb(fs_info, dst);
+ if (!member || !parent) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* check if such qgroup relation exist firstly */
+ list_for_each_entry(list, &member->groups, next_group) {
+ if (list->group == parent) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
ret = add_qgroup_relation_item(trans, quota_root, src, dst);
if (ret)
- return ret;
+ goto out;
ret = add_qgroup_relation_item(trans, quota_root, dst, src);
if (ret) {
del_qgroup_relation_item(trans, quota_root, src, dst);
- return ret;
+ goto out;
}
spin_lock(&fs_info->qgroup_lock);
ret = add_relation_rb(quota_root->fs_info, src, dst);
spin_unlock(&fs_info->qgroup_lock);
-
+out:
+ mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
}
struct btrfs_fs_info *fs_info, u64 src, u64 dst)
{
struct btrfs_root *quota_root;
+ struct btrfs_qgroup *parent;
+ struct btrfs_qgroup *member;
+ struct btrfs_qgroup_list *list;
int ret = 0;
int err;
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
quota_root = fs_info->quota_root;
- if (!quota_root)
- return -EINVAL;
+ if (!quota_root) {
+ ret = -EINVAL;
+ goto out;
+ }
+ member = find_qgroup_rb(fs_info, src);
+ parent = find_qgroup_rb(fs_info, dst);
+ if (!member || !parent) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* check if such qgroup relation exist firstly */
+ list_for_each_entry(list, &member->groups, next_group) {
+ if (list->group == parent)
+ goto exist;
+ }
+ ret = -ENOENT;
+ goto out;
+exist:
ret = del_qgroup_relation_item(trans, quota_root, src, dst);
err = del_qgroup_relation_item(trans, quota_root, dst, src);
if (err && !ret)
spin_lock(&fs_info->qgroup_lock);
del_relation_rb(fs_info, src, dst);
-
spin_unlock(&fs_info->qgroup_lock);
-
+out:
+ mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
}
struct btrfs_qgroup *qgroup;
int ret = 0;
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
quota_root = fs_info->quota_root;
- if (!quota_root)
- return -EINVAL;
+ if (!quota_root) {
+ ret = -EINVAL;
+ goto out;
+ }
+ qgroup = find_qgroup_rb(fs_info, qgroupid);
+ if (qgroup) {
+ ret = -EEXIST;
+ goto out;
+ }
ret = add_qgroup_item(trans, quota_root, qgroupid);
+ if (ret)
+ goto out;
spin_lock(&fs_info->qgroup_lock);
qgroup = add_qgroup_rb(fs_info, qgroupid);
if (IS_ERR(qgroup))
ret = PTR_ERR(qgroup);
-
+out:
+ mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
}
struct btrfs_qgroup *qgroup;
int ret = 0;
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
quota_root = fs_info->quota_root;
- if (!quota_root)
- return -EINVAL;
+ if (!quota_root) {
+ ret = -EINVAL;
+ goto out;
+ }
- /* check if there are no relations to this qgroup */
- spin_lock(&fs_info->qgroup_lock);
qgroup = find_qgroup_rb(fs_info, qgroupid);
- if (qgroup) {
- if (!list_empty(&qgroup->groups) || !list_empty(&qgroup->members)) {
- spin_unlock(&fs_info->qgroup_lock);
- return -EBUSY;
+ if (!qgroup) {
+ ret = -ENOENT;
+ goto out;
+ } else {
+ /* check if there are no relations to this qgroup */
+ if (!list_empty(&qgroup->groups) ||
+ !list_empty(&qgroup->members)) {
+ ret = -EBUSY;
+ goto out;
}
}
- spin_unlock(&fs_info->qgroup_lock);
-
ret = del_qgroup_item(trans, quota_root, qgroupid);
spin_lock(&fs_info->qgroup_lock);
del_qgroup_rb(quota_root->fs_info, qgroupid);
spin_unlock(&fs_info->qgroup_lock);
-
+out:
+ mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
}
struct btrfs_fs_info *fs_info, u64 qgroupid,
struct btrfs_qgroup_limit *limit)
{
- struct btrfs_root *quota_root = fs_info->quota_root;
+ struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
int ret = 0;
- if (!quota_root)
- return -EINVAL;
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
+ quota_root = fs_info->quota_root;
+ if (!quota_root) {
+ ret = -EINVAL;
+ goto out;
+ }
+ qgroup = find_qgroup_rb(fs_info, qgroupid);
+ if (!qgroup) {
+ ret = -ENOENT;
+ goto out;
+ }
ret = update_qgroup_limit_item(trans, quota_root, qgroupid,
limit->flags, limit->max_rfer,
limit->max_excl, limit->rsv_rfer,
}
spin_lock(&fs_info->qgroup_lock);
-
- qgroup = find_qgroup_rb(fs_info, qgroupid);
- if (!qgroup) {
- ret = -ENOENT;
- goto unlock;
- }
qgroup->lim_flags = limit->flags;
qgroup->max_rfer = limit->max_rfer;
qgroup->max_excl = limit->max_excl;
qgroup->rsv_rfer = limit->rsv_rfer;
qgroup->rsv_excl = limit->rsv_excl;
-
-unlock:
spin_unlock(&fs_info->qgroup_lock);
-
+out:
+ mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
}
-static void qgroup_dirty(struct btrfs_fs_info *fs_info,
- struct btrfs_qgroup *qgroup)
-{
- if (list_empty(&qgroup->dirty))
- list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
-}
-
/*
* btrfs_qgroup_record_ref is called when the ref is added or deleted. it puts
* the modification into a list that's later used by btrfs_end_transaction to
return 0;
}
+static int qgroup_account_ref_step1(struct btrfs_fs_info *fs_info,
+ struct ulist *roots, struct ulist *tmp,
+ u64 seq)
+{
+ struct ulist_node *unode;
+ struct ulist_iterator uiter;
+ struct ulist_node *tmp_unode;
+ struct ulist_iterator tmp_uiter;
+ struct btrfs_qgroup *qg;
+ int ret;
+
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(roots, &uiter))) {
+ qg = find_qgroup_rb(fs_info, unode->val);
+ if (!qg)
+ continue;
+
+ ulist_reinit(tmp);
+ /* XXX id not needed */
+ ret = ulist_add(tmp, qg->qgroupid,
+ (u64)(uintptr_t)qg, GFP_ATOMIC);
+ if (ret < 0)
+ return ret;
+ ULIST_ITER_INIT(&tmp_uiter);
+ while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
+ struct btrfs_qgroup_list *glist;
+
+ qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
+ if (qg->refcnt < seq)
+ qg->refcnt = seq + 1;
+ else
+ ++qg->refcnt;
+
+ list_for_each_entry(glist, &qg->groups, next_group) {
+ ret = ulist_add(tmp, glist->group->qgroupid,
+ (u64)(uintptr_t)glist->group,
+ GFP_ATOMIC);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int qgroup_account_ref_step2(struct btrfs_fs_info *fs_info,
+ struct ulist *roots, struct ulist *tmp,
+ u64 seq, int sgn, u64 num_bytes,
+ struct btrfs_qgroup *qgroup)
+{
+ struct ulist_node *unode;
+ struct ulist_iterator uiter;
+ struct btrfs_qgroup *qg;
+ struct btrfs_qgroup_list *glist;
+ int ret;
+
+ ulist_reinit(tmp);
+ ret = ulist_add(tmp, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
+ if (ret < 0)
+ return ret;
+
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(tmp, &uiter))) {
+ qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
+ if (qg->refcnt < seq) {
+ /* not visited by step 1 */
+ qg->rfer += sgn * num_bytes;
+ qg->rfer_cmpr += sgn * num_bytes;
+ if (roots->nnodes == 0) {
+ qg->excl += sgn * num_bytes;
+ qg->excl_cmpr += sgn * num_bytes;
+ }
+ qgroup_dirty(fs_info, qg);
+ }
+ WARN_ON(qg->tag >= seq);
+ qg->tag = seq;
+
+ list_for_each_entry(glist, &qg->groups, next_group) {
+ ret = ulist_add(tmp, glist->group->qgroupid,
+ (uintptr_t)glist->group, GFP_ATOMIC);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int qgroup_account_ref_step3(struct btrfs_fs_info *fs_info,
+ struct ulist *roots, struct ulist *tmp,
+ u64 seq, int sgn, u64 num_bytes)
+{
+ struct ulist_node *unode;
+ struct ulist_iterator uiter;
+ struct btrfs_qgroup *qg;
+ struct ulist_node *tmp_unode;
+ struct ulist_iterator tmp_uiter;
+ int ret;
+
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(roots, &uiter))) {
+ qg = find_qgroup_rb(fs_info, unode->val);
+ if (!qg)
+ continue;
+
+ ulist_reinit(tmp);
+ ret = ulist_add(tmp, qg->qgroupid, (uintptr_t)qg, GFP_ATOMIC);
+ if (ret < 0)
+ return ret;
+
+ ULIST_ITER_INIT(&tmp_uiter);
+ while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
+ struct btrfs_qgroup_list *glist;
+
+ qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
+ if (qg->tag == seq)
+ continue;
+
+ if (qg->refcnt - seq == roots->nnodes) {
+ qg->excl -= sgn * num_bytes;
+ qg->excl_cmpr -= sgn * num_bytes;
+ qgroup_dirty(fs_info, qg);
+ }
+
+ list_for_each_entry(glist, &qg->groups, next_group) {
+ ret = ulist_add(tmp, glist->group->qgroupid,
+ (uintptr_t)glist->group,
+ GFP_ATOMIC);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
/*
* btrfs_qgroup_account_ref is called for every ref that is added to or deleted
* from the fs. First, all roots referencing the extent are searched, and
struct btrfs_root *quota_root;
u64 ref_root;
struct btrfs_qgroup *qgroup;
- struct ulist_node *unode;
struct ulist *roots = NULL;
struct ulist *tmp = NULL;
- struct ulist_iterator uiter;
u64 seq;
int ret = 0;
int sgn;
case BTRFS_ADD_DELAYED_REF:
case BTRFS_ADD_DELAYED_EXTENT:
sgn = 1;
+ seq = btrfs_tree_mod_seq_prev(node->seq);
break;
case BTRFS_DROP_DELAYED_REF:
sgn = -1;
+ seq = node->seq;
break;
case BTRFS_UPDATE_DELAYED_HEAD:
return 0;
BUG();
}
+ mutex_lock(&fs_info->qgroup_rescan_lock);
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
+ if (fs_info->qgroup_rescan_progress.objectid <= node->bytenr) {
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
+ return 0;
+ }
+ }
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
+
/*
* the delayed ref sequence number we pass depends on the direction of
- * the operation. for add operations, we pass (node->seq - 1) to skip
+ * the operation. for add operations, we pass
+ * tree_mod_log_prev_seq(node->seq) to skip
* the delayed ref's current sequence number, because we need the state
* of the tree before the add operation. for delete operations, we pass
* (node->seq) to include the delayed ref's current sequence number,
* because we need the state of the tree after the delete operation.
*/
- ret = btrfs_find_all_roots(trans, fs_info, node->bytenr,
- sgn > 0 ? node->seq - 1 : node->seq, &roots);
+ ret = btrfs_find_all_roots(trans, fs_info, node->bytenr, seq, &roots);
if (ret < 0)
return ret;
+ mutex_lock(&fs_info->qgroup_rescan_lock);
spin_lock(&fs_info->qgroup_lock);
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
+ if (fs_info->qgroup_rescan_progress.objectid <= node->bytenr) {
+ ret = 0;
+ goto unlock;
+ }
+ }
+
quota_root = fs_info->quota_root;
if (!quota_root)
goto unlock;
seq = fs_info->qgroup_seq;
fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(roots, &uiter))) {
- struct ulist_node *tmp_unode;
- struct ulist_iterator tmp_uiter;
- struct btrfs_qgroup *qg;
-
- qg = find_qgroup_rb(fs_info, unode->val);
- if (!qg)
- continue;
-
- ulist_reinit(tmp);
- /* XXX id not needed */
- ulist_add(tmp, qg->qgroupid, (u64)(uintptr_t)qg, GFP_ATOMIC);
- ULIST_ITER_INIT(&tmp_uiter);
- while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
- struct btrfs_qgroup_list *glist;
-
- qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
- if (qg->refcnt < seq)
- qg->refcnt = seq + 1;
- else
- ++qg->refcnt;
-
- list_for_each_entry(glist, &qg->groups, next_group) {
- ulist_add(tmp, glist->group->qgroupid,
- (u64)(uintptr_t)glist->group,
- GFP_ATOMIC);
- }
- }
- }
+ ret = qgroup_account_ref_step1(fs_info, roots, tmp, seq);
+ if (ret)
+ goto unlock;
/*
* step 2: walk from the new root
*/
- ulist_reinit(tmp);
- ulist_add(tmp, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(tmp, &uiter))) {
- struct btrfs_qgroup *qg;
- struct btrfs_qgroup_list *glist;
-
- qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
- if (qg->refcnt < seq) {
- /* not visited by step 1 */
- qg->rfer += sgn * node->num_bytes;
- qg->rfer_cmpr += sgn * node->num_bytes;
- if (roots->nnodes == 0) {
- qg->excl += sgn * node->num_bytes;
- qg->excl_cmpr += sgn * node->num_bytes;
- }
- qgroup_dirty(fs_info, qg);
- }
- WARN_ON(qg->tag >= seq);
- qg->tag = seq;
-
- list_for_each_entry(glist, &qg->groups, next_group) {
- ulist_add(tmp, glist->group->qgroupid,
- (uintptr_t)glist->group, GFP_ATOMIC);
- }
- }
+ ret = qgroup_account_ref_step2(fs_info, roots, tmp, seq, sgn,
+ node->num_bytes, qgroup);
+ if (ret)
+ goto unlock;
/*
* step 3: walk again from old refs
*/
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(roots, &uiter))) {
- struct btrfs_qgroup *qg;
- struct ulist_node *tmp_unode;
- struct ulist_iterator tmp_uiter;
-
- qg = find_qgroup_rb(fs_info, unode->val);
- if (!qg)
- continue;
-
- ulist_reinit(tmp);
- ulist_add(tmp, qg->qgroupid, (uintptr_t)qg, GFP_ATOMIC);
- ULIST_ITER_INIT(&tmp_uiter);
- while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
- struct btrfs_qgroup_list *glist;
-
- qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
- if (qg->tag == seq)
- continue;
-
- if (qg->refcnt - seq == roots->nnodes) {
- qg->excl -= sgn * node->num_bytes;
- qg->excl_cmpr -= sgn * node->num_bytes;
- qgroup_dirty(fs_info, qg);
- }
+ ret = qgroup_account_ref_step3(fs_info, roots, tmp, seq, sgn,
+ node->num_bytes);
+ if (ret)
+ goto unlock;
- list_for_each_entry(glist, &qg->groups, next_group) {
- ulist_add(tmp, glist->group->qgroupid,
- (uintptr_t)glist->group,
- GFP_ATOMIC);
- }
- }
- }
- ret = 0;
unlock:
spin_unlock(&fs_info->qgroup_lock);
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
ulist_free(roots);
ulist_free(tmp);
{
struct btrfs_root *quota_root = fs_info->quota_root;
int ret = 0;
+ int start_rescan_worker = 0;
if (!quota_root)
goto out;
+ if (!fs_info->quota_enabled && fs_info->pending_quota_state)
+ start_rescan_worker = 1;
+
fs_info->quota_enabled = fs_info->pending_quota_state;
spin_lock(&fs_info->qgroup_lock);
if (ret)
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ if (!ret && start_rescan_worker) {
+ ret = btrfs_qgroup_rescan(fs_info);
+ if (ret)
+ pr_err("btrfs: start rescan quota failed: %d\n", ret);
+ ret = 0;
+ }
+
out:
return ret;
struct btrfs_qgroup *srcgroup;
struct btrfs_qgroup *dstgroup;
u32 level_size = 0;
+ u64 nums;
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_enabled)
- return 0;
+ goto out;
- if (!quota_root)
- return -EINVAL;
+ if (!quota_root) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (inherit) {
+ i_qgroups = (u64 *)(inherit + 1);
+ nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
+ 2 * inherit->num_excl_copies;
+ for (i = 0; i < nums; ++i) {
+ srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
+ if (!srcgroup) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ++i_qgroups;
+ }
+ }
/*
* create a tracking group for the subvol itself
unlock:
spin_unlock(&fs_info->qgroup_lock);
out:
+ mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
}
ret = -ENOMEM;
goto out;
}
- ulist_add(ulist, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
+ ret = ulist_add(ulist, qgroup->qgroupid,
+ (uintptr_t)qgroup, GFP_ATOMIC);
+ if (ret < 0)
+ goto out;
ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(ulist, &uiter))) {
struct btrfs_qgroup *qg;
qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
- qg->reserved + qg->rfer + num_bytes >
+ qg->reserved + (s64)qg->rfer + num_bytes >
qg->max_rfer) {
ret = -EDQUOT;
goto out;
}
if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
- qg->reserved + qg->excl + num_bytes >
+ qg->reserved + (s64)qg->excl + num_bytes >
qg->max_excl) {
ret = -EDQUOT;
goto out;
}
list_for_each_entry(glist, &qg->groups, next_group) {
- ulist_add(ulist, glist->group->qgroupid,
- (uintptr_t)glist->group, GFP_ATOMIC);
+ ret = ulist_add(ulist, glist->group->qgroupid,
+ (uintptr_t)glist->group, GFP_ATOMIC);
+ if (ret < 0)
+ goto out;
}
}
-
+ ret = 0;
/*
* no limits exceeded, now record the reservation into all qgroups
*/
struct ulist_node *unode;
struct ulist_iterator uiter;
u64 ref_root = root->root_key.objectid;
+ int ret = 0;
if (!is_fstree(ref_root))
return;
btrfs_std_error(fs_info, -ENOMEM);
goto out;
}
- ulist_add(ulist, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
+ ret = ulist_add(ulist, qgroup->qgroupid,
+ (uintptr_t)qgroup, GFP_ATOMIC);
+ if (ret < 0)
+ goto out;
ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(ulist, &uiter))) {
struct btrfs_qgroup *qg;
qg->reserved -= num_bytes;
list_for_each_entry(glist, &qg->groups, next_group) {
- ulist_add(ulist, glist->group->qgroupid,
- (uintptr_t)glist->group, GFP_ATOMIC);
+ ret = ulist_add(ulist, glist->group->qgroupid,
+ (uintptr_t)glist->group, GFP_ATOMIC);
+ if (ret < 0)
+ goto out;
}
}
{
if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
return;
- printk(KERN_ERR "btrfs: qgroups not uptodate in trans handle %p: list is%s empty, seq is %llu\n",
+ pr_err("btrfs: qgroups not uptodate in trans handle %p: list is%s empty, seq is %#x.%x\n",
trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
- trans->delayed_ref_elem.seq);
+ (u32)(trans->delayed_ref_elem.seq >> 32),
+ (u32)trans->delayed_ref_elem.seq);
BUG();
}
+
+/*
+ * returns < 0 on error, 0 when more leafs are to be scanned.
+ * returns 1 when done, 2 when done and FLAG_INCONSISTENT was cleared.
+ */
+static int
+qgroup_rescan_leaf(struct qgroup_rescan *qscan, struct btrfs_path *path,
+ struct btrfs_trans_handle *trans, struct ulist *tmp,
+ struct extent_buffer *scratch_leaf)
+{
+ struct btrfs_key found;
+ struct btrfs_fs_info *fs_info = qscan->fs_info;
+ struct ulist *roots = NULL;
+ struct ulist_node *unode;
+ struct ulist_iterator uiter;
+ struct seq_list tree_mod_seq_elem = {};
+ u64 seq;
+ int slot;
+ int ret;
+
+ path->leave_spinning = 1;
+ mutex_lock(&fs_info->qgroup_rescan_lock);
+ ret = btrfs_search_slot_for_read(fs_info->extent_root,
+ &fs_info->qgroup_rescan_progress,
+ path, 1, 0);
+
+ pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
+ (unsigned long long)fs_info->qgroup_rescan_progress.objectid,
+ fs_info->qgroup_rescan_progress.type,
+ (unsigned long long)fs_info->qgroup_rescan_progress.offset,
+ ret);
+
+ if (ret) {
+ /*
+ * The rescan is about to end, we will not be scanning any
+ * further blocks. We cannot unset the RESCAN flag here, because
+ * we want to commit the transaction if everything went well.
+ * To make the live accounting work in this phase, we set our
+ * scan progress pointer such that every real extent objectid
+ * will be smaller.
+ */
+ fs_info->qgroup_rescan_progress.objectid = (u64)-1;
+ btrfs_release_path(path);
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
+ return ret;
+ }
+
+ btrfs_item_key_to_cpu(path->nodes[0], &found,
+ btrfs_header_nritems(path->nodes[0]) - 1);
+ fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
+
+ btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
+ memcpy(scratch_leaf, path->nodes[0], sizeof(*scratch_leaf));
+ slot = path->slots[0];
+ btrfs_release_path(path);
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
+
+ for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
+ btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
+ if (found.type != BTRFS_EXTENT_ITEM_KEY)
+ continue;
+ ret = btrfs_find_all_roots(trans, fs_info, found.objectid,
+ tree_mod_seq_elem.seq, &roots);
+ if (ret < 0)
+ goto out;
+ spin_lock(&fs_info->qgroup_lock);
+ seq = fs_info->qgroup_seq;
+ fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
+
+ ret = qgroup_account_ref_step1(fs_info, roots, tmp, seq);
+ if (ret) {
+ spin_unlock(&fs_info->qgroup_lock);
+ ulist_free(roots);
+ goto out;
+ }
+
+ /*
+ * step2 of btrfs_qgroup_account_ref works from a single root,
+ * we're doing all at once here.
+ */
+ ulist_reinit(tmp);
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(roots, &uiter))) {
+ struct btrfs_qgroup *qg;
+
+ qg = find_qgroup_rb(fs_info, unode->val);
+ if (!qg)
+ continue;
+
+ ret = ulist_add(tmp, qg->qgroupid, (uintptr_t)qg,
+ GFP_ATOMIC);
+ if (ret < 0) {
+ spin_unlock(&fs_info->qgroup_lock);
+ ulist_free(roots);
+ goto out;
+ }
+ }
+
+ /* this loop is similar to step 2 of btrfs_qgroup_account_ref */
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(tmp, &uiter))) {
+ struct btrfs_qgroup *qg;
+ struct btrfs_qgroup_list *glist;
+
+ qg = (struct btrfs_qgroup *)(uintptr_t) unode->aux;
+ qg->rfer += found.offset;
+ qg->rfer_cmpr += found.offset;
+ WARN_ON(qg->tag >= seq);
+ if (qg->refcnt - seq == roots->nnodes) {
+ qg->excl += found.offset;
+ qg->excl_cmpr += found.offset;
+ }
+ qgroup_dirty(fs_info, qg);
+
+ list_for_each_entry(glist, &qg->groups, next_group) {
+ ret = ulist_add(tmp, glist->group->qgroupid,
+ (uintptr_t)glist->group,
+ GFP_ATOMIC);
+ if (ret < 0) {
+ spin_unlock(&fs_info->qgroup_lock);
+ ulist_free(roots);
+ goto out;
+ }
+ }
+ }
+
+ spin_unlock(&fs_info->qgroup_lock);
+ ulist_free(roots);
+ ret = 0;
+ }
+
+out:
+ btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
+
+ return ret;
+}
+
+static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
+{
+ struct qgroup_rescan *qscan = container_of(work, struct qgroup_rescan,
+ work);
+ struct btrfs_path *path;
+ struct btrfs_trans_handle *trans = NULL;
+ struct btrfs_fs_info *fs_info = qscan->fs_info;
+ struct ulist *tmp = NULL;
+ struct extent_buffer *scratch_leaf = NULL;
+ int err = -ENOMEM;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ goto out;
+ tmp = ulist_alloc(GFP_NOFS);
+ if (!tmp)
+ goto out;
+ scratch_leaf = kmalloc(sizeof(*scratch_leaf), GFP_NOFS);
+ if (!scratch_leaf)
+ goto out;
+
+ err = 0;
+ while (!err) {
+ trans = btrfs_start_transaction(fs_info->fs_root, 0);
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
+ break;
+ }
+ if (!fs_info->quota_enabled) {
+ err = -EINTR;
+ } else {
+ err = qgroup_rescan_leaf(qscan, path, trans,
+ tmp, scratch_leaf);
+ }
+ if (err > 0)
+ btrfs_commit_transaction(trans, fs_info->fs_root);
+ else
+ btrfs_end_transaction(trans, fs_info->fs_root);
+ }
+
+out:
+ kfree(scratch_leaf);
+ ulist_free(tmp);
+ btrfs_free_path(path);
+ kfree(qscan);
+
+ mutex_lock(&fs_info->qgroup_rescan_lock);
+ fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
+
+ if (err == 2 &&
+ fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
+ fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ } else if (err < 0) {
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ }
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
+
+ if (err >= 0) {
+ pr_info("btrfs: qgroup scan completed%s\n",
+ err == 2 ? " (inconsistency flag cleared)" : "");
+ } else {
+ pr_err("btrfs: qgroup scan failed with %d\n", err);
+ }
+}
+
+static void
+qgroup_rescan_start(struct btrfs_fs_info *fs_info, struct qgroup_rescan *qscan)
+{
+ memset(&qscan->work, 0, sizeof(qscan->work));
+ qscan->work.func = btrfs_qgroup_rescan_worker;
+ qscan->fs_info = fs_info;
+
+ pr_info("btrfs: qgroup scan started\n");
+ btrfs_queue_worker(&fs_info->qgroup_rescan_workers, &qscan->work);
+}
+
+int
+btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
+{
+ int ret = 0;
+ struct rb_node *n;
+ struct btrfs_qgroup *qgroup;
+ struct qgroup_rescan *qscan = kmalloc(sizeof(*qscan), GFP_NOFS);
+
+ if (!qscan)
+ return -ENOMEM;
+
+ mutex_lock(&fs_info->qgroup_rescan_lock);
+ spin_lock(&fs_info->qgroup_lock);
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
+ ret = -EINPROGRESS;
+ else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
+ ret = -EINVAL;
+ if (ret) {
+ spin_unlock(&fs_info->qgroup_lock);
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
+ kfree(qscan);
+ return ret;
+ }
+
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
+ memset(&fs_info->qgroup_rescan_progress, 0,
+ sizeof(fs_info->qgroup_rescan_progress));
+
+ /* clear all current qgroup tracking information */
+ for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
+ qgroup = rb_entry(n, struct btrfs_qgroup, node);
+ qgroup->rfer = 0;
+ qgroup->rfer_cmpr = 0;
+ qgroup->excl = 0;
+ qgroup->excl_cmpr = 0;
+ }
+ spin_unlock(&fs_info->qgroup_lock);
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
+
+ qgroup_rescan_start(fs_info, qscan);
+
+ return 0;
+}
/*
* remove everything in the cache
*/
-void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
+static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
{
struct btrfs_stripe_hash_table *table;
unsigned long flags;
* this will try to merge into existing bios if possible, and returns
* zero if all went well.
*/
-int rbio_add_io_page(struct btrfs_raid_bio *rbio,
- struct bio_list *bio_list,
- struct page *page,
- int stripe_nr,
- unsigned long page_index,
- unsigned long bio_max_len)
+static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
+ struct bio_list *bio_list,
+ struct page *page,
+ int stripe_nr,
+ unsigned long page_index,
+ unsigned long bio_max_len)
{
struct bio *last = bio_list->tail;
u64 last_end = 0;
while (atomic_read(&rc->elems)) {
wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
5 * HZ);
- dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0);
+ dump_devs(rc->root->fs_info,
+ atomic_read(&rc->elems) < 10 ? 1 : 0);
}
- dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0);
+ dump_devs(rc->root->fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
kref_put(&rc->refcnt, reada_control_release);
return NULL;
}
-void backref_tree_panic(struct rb_node *rb_node, int errno,
- u64 bytenr)
+static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
{
struct btrfs_fs_info *fs_info = NULL;
int find_inline_backref(struct extent_buffer *leaf, int slot,
unsigned long *ptr, unsigned long *end)
{
+ struct btrfs_key key;
struct btrfs_extent_item *ei;
struct btrfs_tree_block_info *bi;
u32 item_size;
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+
item_size = btrfs_item_size_nr(leaf, slot);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
if (item_size < sizeof(*ei)) {
WARN_ON(!(btrfs_extent_flags(leaf, ei) &
BTRFS_EXTENT_FLAG_TREE_BLOCK));
- if (item_size <= sizeof(*ei) + sizeof(*bi)) {
+ if (key.type == BTRFS_EXTENT_ITEM_KEY &&
+ item_size <= sizeof(*ei) + sizeof(*bi)) {
WARN_ON(item_size < sizeof(*ei) + sizeof(*bi));
return 1;
}
- bi = (struct btrfs_tree_block_info *)(ei + 1);
- *ptr = (unsigned long)(bi + 1);
+ if (key.type == BTRFS_EXTENT_ITEM_KEY) {
+ bi = (struct btrfs_tree_block_info *)(ei + 1);
+ *ptr = (unsigned long)(bi + 1);
+ } else {
+ *ptr = (unsigned long)(ei + 1);
+ }
*end = (unsigned long)ei + item_size;
return 0;
}
end = 0;
ptr = 0;
key.objectid = cur->bytenr;
- key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.type = BTRFS_METADATA_ITEM_KEY;
key.offset = (u64)-1;
path1->search_commit_root = 1;
break;
}
- if (key.type == BTRFS_EXTENT_ITEM_KEY) {
+ if (key.type == BTRFS_EXTENT_ITEM_KEY ||
+ key.type == BTRFS_METADATA_ITEM_KEY) {
ret = find_inline_backref(eb, path1->slots[0],
&ptr, &end);
if (ret)
eb = read_tree_block(dest, old_bytenr, blocksize,
old_ptr_gen);
- BUG_ON(!eb);
+ if (!eb || !extent_buffer_uptodate(eb)) {
+ ret = (!eb) ? -ENOMEM : -EIO;
+ free_extent_buffer(eb);
+ return ret;
+ }
btrfs_tree_lock(eb);
if (cow) {
ret = btrfs_cow_block(trans, dest, eb, parent,
bytenr = btrfs_node_blockptr(eb, path->slots[i]);
blocksize = btrfs_level_size(root, i - 1);
eb = read_tree_block(root, bytenr, blocksize, ptr_gen);
+ if (!eb || !extent_buffer_uptodate(eb)) {
+ free_extent_buffer(eb);
+ return -EIO;
+ }
BUG_ON(btrfs_header_level(eb) != i - 1);
path->nodes[i - 1] = eb;
path->slots[i - 1] = 0;
blocksize = btrfs_level_size(root, node->level);
generation = btrfs_node_ptr_generation(upper->eb, slot);
eb = read_tree_block(root, bytenr, blocksize, generation);
- if (!eb) {
+ if (!eb || !extent_buffer_uptodate(eb)) {
+ free_extent_buffer(eb);
err = -EIO;
goto next;
}
BUG_ON(block->key_ready);
eb = read_tree_block(rc->extent_root, block->bytenr,
block->key.objectid, block->key.offset);
- BUG_ON(!eb);
+ if (!eb || !extent_buffer_uptodate(eb)) {
+ free_extent_buffer(eb);
+ return -EIO;
+ }
WARN_ON(btrfs_header_level(eb) != block->level);
if (block->level == 0)
btrfs_item_key_to_cpu(eb, &block->key, 0);
struct tree_block *block)
{
BUG_ON(block->key_ready);
- readahead_tree_block(rc->extent_root, block->bytenr,
- block->key.objectid, block->key.offset);
+ if (block->key.type == BTRFS_METADATA_ITEM_KEY)
+ readahead_tree_block(rc->extent_root, block->bytenr,
+ block->key.objectid,
+ rc->extent_root->leafsize);
+ else
+ readahead_tree_block(rc->extent_root, block->bytenr,
+ block->key.objectid, block->key.offset);
return 0;
}
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
- goto out_path;
+ goto out_free_blocks;
}
rb_node = rb_first(blocks);
rb_node = rb_first(blocks);
while (rb_node) {
block = rb_entry(rb_node, struct tree_block, rb_node);
- if (!block->key_ready)
- get_tree_block_key(rc, block);
+ if (!block->key_ready) {
+ err = get_tree_block_key(rc, block);
+ if (err)
+ goto out_free_path;
+ }
rb_node = rb_next(rb_node);
}
out:
err = finish_pending_nodes(trans, rc, path, err);
+out_free_path:
btrfs_free_path(path);
-out_path:
+out_free_blocks:
free_block_list(blocks);
return err;
}
lock_extent(&BTRFS_I(inode)->io_tree, start, end);
while (1) {
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em);
+ ret = add_extent_mapping(em_tree, em, 0);
write_unlock(&em_tree->lock);
if (ret != -EEXIST) {
free_extent_map(em);
eb = path->nodes[0];
item_size = btrfs_item_size_nr(eb, path->slots[0]);
- if (item_size >= sizeof(*ei) + sizeof(*bi)) {
+ if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
+ item_size >= sizeof(*ei) + sizeof(*bi)) {
ei = btrfs_item_ptr(eb, path->slots[0],
struct btrfs_extent_item);
- bi = (struct btrfs_tree_block_info *)(ei + 1);
+ if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
+ bi = (struct btrfs_tree_block_info *)(ei + 1);
+ level = btrfs_tree_block_level(eb, bi);
+ } else {
+ level = (int)extent_key->offset;
+ }
generation = btrfs_extent_generation(eb, ei);
- level = btrfs_tree_block_level(eb, bi);
} else {
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
u64 ref_owner;
return -ENOMEM;
block->bytenr = extent_key->objectid;
- block->key.objectid = extent_key->offset;
+ block->key.objectid = rc->extent_root->leafsize;
block->key.offset = generation;
block->level = level;
block->key_ready = 0;
ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
if (ret < 0)
goto out;
- BUG_ON(ret);
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ if (ret > 0) {
+ if (key.objectid == bytenr &&
+ key.type == BTRFS_METADATA_ITEM_KEY)
+ ret = 0;
+ }
+ BUG_ON(ret);
+
ret = add_tree_block(rc, &key, path, blocks);
out:
btrfs_free_path(path);
return 1;
ret = btrfs_lookup_extent_info(NULL, rc->extent_root,
- eb->start, eb->len, NULL, &flags);
+ eb->start, btrfs_header_level(eb), 1,
+ NULL, &flags);
BUG_ON(ret);
if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
break;
}
- if (key.type != BTRFS_EXTENT_ITEM_KEY ||
+ if (key.type != BTRFS_EXTENT_ITEM_KEY &&
+ key.type != BTRFS_METADATA_ITEM_KEY) {
+ path->slots[0]++;
+ goto next;
+ }
+
+ if (key.type == BTRFS_EXTENT_ITEM_KEY &&
key.objectid + key.offset <= rc->search_start) {
path->slots[0]++;
goto next;
}
+ if (key.type == BTRFS_METADATA_ITEM_KEY &&
+ key.objectid + rc->extent_root->leafsize <=
+ rc->search_start) {
+ path->slots[0]++;
+ goto next;
+ }
+
ret = find_first_extent_bit(&rc->processed_blocks,
key.objectid, &start, &end,
EXTENT_DIRTY, NULL);
btrfs_release_path(path);
rc->search_start = end + 1;
} else {
- rc->search_start = key.objectid + key.offset;
+ if (key.type == BTRFS_EXTENT_ITEM_KEY)
+ rc->search_start = key.objectid + key.offset;
+ else
+ rc->search_start = key.objectid +
+ rc->extent_root->leafsize;
memcpy(extent_key, &key, sizeof(key));
return 0;
}
while (1) {
mutex_lock(&fs_info->cleaner_mutex);
-
- btrfs_clean_old_snapshots(fs_info->tree_root);
ret = relocate_block_group(rc);
-
mutex_unlock(&fs_info->cleaner_mutex);
if (ret < 0) {
err = ret;
* generation numbers as then we know the root was once mounted with an older
* kernel that was not aware of the root item structure change.
*/
-void btrfs_read_root_item(struct btrfs_root *root,
- struct extent_buffer *eb, int slot,
- struct btrfs_root_item *item)
+void btrfs_read_root_item(struct extent_buffer *eb, int slot,
+ struct btrfs_root_item *item)
{
uuid_le uuid;
int len;
goto out;
}
if (item)
- btrfs_read_root_item(root, l, slot, item);
+ btrfs_read_root_item(l, slot, item);
if (key)
memcpy(key, &found_key, sizeof(found_key));
int page_num;
u8 calculated_csum[BTRFS_CSUM_SIZE];
u32 crc = ~(u32)0;
- struct btrfs_root *root = fs_info->extent_root;
void *mapped_buffer;
WARN_ON(!sblock->pagev[0]->page);
for (page_num = 0;;) {
if (page_num == 0 && is_metadata)
- crc = btrfs_csum_data(root,
+ crc = btrfs_csum_data(
((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
else
- crc = btrfs_csum_data(root, mapped_buffer, crc,
- PAGE_SIZE);
+ crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
kunmap_atomic(mapped_buffer);
page_num++;
void *buffer;
u32 crc = ~(u32)0;
int fail = 0;
- struct btrfs_root *root = sctx->dev_root;
u64 len;
int index;
for (;;) {
u64 l = min_t(u64, len, PAGE_SIZE);
- crc = btrfs_csum_data(root, buffer, crc, l);
+ crc = btrfs_csum_data(buffer, crc, l);
kunmap_atomic(buffer);
len -= l;
if (len == 0)
for (;;) {
u64 l = min_t(u64, len, mapped_size);
- crc = btrfs_csum_data(root, p, crc, l);
+ crc = btrfs_csum_data(p, crc, l);
kunmap_atomic(mapped_buffer);
len -= l;
if (len == 0)
for (;;) {
u64 l = min_t(u64, len, mapped_size);
- crc = btrfs_csum_data(root, p, crc, l);
+ crc = btrfs_csum_data(p, crc, l);
kunmap_atomic(mapped_buffer);
len -= l;
if (len == 0)
u64 flags;
int ret;
int slot;
- int i;
u64 nstripes;
struct extent_buffer *l;
struct btrfs_key key;
u64 physical;
u64 logical;
+ u64 logic_end;
u64 generation;
int mirror_num;
struct reada_control *reada1;
u64 extent_len;
struct btrfs_device *extent_dev;
int extent_mirror_num;
+ int stop_loop;
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6)) {
key_start.type = BTRFS_EXTENT_ITEM_KEY;
key_start.offset = (u64)0;
key_end.objectid = base + offset + nstripes * increment;
- key_end.type = BTRFS_EXTENT_ITEM_KEY;
- key_end.offset = (u64)0;
+ key_end.type = BTRFS_METADATA_ITEM_KEY;
+ key_end.offset = (u64)-1;
reada1 = btrfs_reada_add(root, &key_start, &key_end);
key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
*/
logical = base + offset;
physical = map->stripes[num].physical;
+ logic_end = logical + increment * nstripes;
ret = 0;
- for (i = 0; i < nstripes; ++i) {
+ while (logical < logic_end) {
/*
* canceled?
*/
wake_up(&fs_info->scrub_pause_wait);
}
- ret = btrfs_lookup_csums_range(csum_root, logical,
- logical + map->stripe_len - 1,
- &sctx->csum_list, 1);
- if (ret)
- goto out;
-
key.objectid = logical;
key.type = BTRFS_EXTENT_ITEM_KEY;
- key.offset = (u64)0;
+ key.offset = (u64)-1;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
+
if (ret > 0) {
ret = btrfs_previous_item(root, path, 0,
BTRFS_EXTENT_ITEM_KEY);
}
}
+ stop_loop = 0;
while (1) {
+ u64 bytes;
+
l = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(l)) {
if (ret < 0)
goto out;
+ stop_loop = 1;
break;
}
btrfs_item_key_to_cpu(l, &key, slot);
- if (key.objectid + key.offset <= logical)
- goto next;
+ if (key.type == BTRFS_METADATA_ITEM_KEY)
+ bytes = root->leafsize;
+ else
+ bytes = key.offset;
- if (key.objectid >= logical + map->stripe_len)
- break;
+ if (key.objectid + bytes <= logical)
+ goto next;
- if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
+ if (key.type != BTRFS_EXTENT_ITEM_KEY &&
+ key.type != BTRFS_METADATA_ITEM_KEY)
goto next;
+ if (key.objectid >= logical + map->stripe_len) {
+ /* out of this device extent */
+ if (key.objectid >= logic_end)
+ stop_loop = 1;
+ break;
+ }
+
extent = btrfs_item_ptr(l, slot,
struct btrfs_extent_item);
flags = btrfs_extent_flags(l, extent);
goto next;
}
+again:
+ extent_logical = key.objectid;
+ extent_len = bytes;
+
/*
* trim extent to this stripe
*/
- if (key.objectid < logical) {
- key.offset -= logical - key.objectid;
- key.objectid = logical;
+ if (extent_logical < logical) {
+ extent_len -= logical - extent_logical;
+ extent_logical = logical;
}
- if (key.objectid + key.offset >
+ if (extent_logical + extent_len >
logical + map->stripe_len) {
- key.offset = logical + map->stripe_len -
- key.objectid;
+ extent_len = logical + map->stripe_len -
+ extent_logical;
}
- extent_logical = key.objectid;
- extent_physical = key.objectid - logical + physical;
- extent_len = key.offset;
+ extent_physical = extent_logical - logical + physical;
extent_dev = scrub_dev;
extent_mirror_num = mirror_num;
if (is_dev_replace)
extent_len, &extent_physical,
&extent_dev,
&extent_mirror_num);
+
+ ret = btrfs_lookup_csums_range(csum_root, logical,
+ logical + map->stripe_len - 1,
+ &sctx->csum_list, 1);
+ if (ret)
+ goto out;
+
ret = scrub_extent(sctx, extent_logical, extent_len,
extent_physical, extent_dev, flags,
generation, extent_mirror_num,
- key.objectid - logical + physical);
+ extent_physical);
if (ret)
goto out;
+ if (extent_logical + extent_len <
+ key.objectid + bytes) {
+ logical += increment;
+ physical += map->stripe_len;
+
+ if (logical < key.objectid + bytes) {
+ cond_resched();
+ goto again;
+ }
+
+ if (logical >= logic_end) {
+ stop_loop = 1;
+ break;
+ }
+ }
next:
path->slots[0]++;
}
logical += increment;
physical += map->stripe_len;
spin_lock(&sctx->stat_lock);
- sctx->stat.last_physical = physical;
+ if (stop_loop)
+ sctx->stat.last_physical = map->stripes[num].physical +
+ length;
+ else
+ sctx->stat.last_physical = physical;
spin_unlock(&sctx->stat_lock);
+ if (stop_loop)
+ break;
}
out:
/* push queued extents */
return 0;
}
-int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
-{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_device *dev;
- int ret;
-
- /*
- * we have to hold the device_list_mutex here so the device
- * does not go away in cancel_dev. FIXME: find a better solution
- */
- mutex_lock(&fs_info->fs_devices->device_list_mutex);
- dev = btrfs_find_device(fs_info, devid, NULL, NULL);
- if (!dev) {
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- return -ENODEV;
- }
- ret = btrfs_scrub_cancel_dev(fs_info, dev);
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-
- return ret;
-}
-
int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
struct btrfs_scrub_progress *progress)
{
return path;
}
-int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
+static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
{
int ret;
mm_segment_t old_fs;
struct send_ctx *sctx = ctx;
char *found_data = NULL;
int found_data_len = 0;
- struct fs_path *p = NULL;
ret = find_xattr(sctx, sctx->parent_root, sctx->right_path,
sctx->cmp_key, name, name_len, &found_data,
}
kfree(found_data);
- fs_path_free(sctx, p);
return ret;
}
{
int ret;
- ret = send_header(sctx);
- if (ret < 0)
- goto out;
+ if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
+ ret = send_header(sctx);
+ if (ret < 0)
+ goto out;
+ }
ret = send_subvol_begin(sctx);
if (ret < 0)
goto out;
}
- if (arg->flags & ~BTRFS_SEND_FLAG_NO_FILE_DATA) {
+ if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
ret = -EINVAL;
goto out;
}
sctx->flags = arg->flags;
sctx->send_filp = fget(arg->send_fd);
- if (IS_ERR(sctx->send_filp)) {
- ret = PTR_ERR(sctx->send_filp);
+ if (!sctx->send_filp) {
+ ret = -EBADF;
goto out;
}
if (ret < 0)
goto out;
- ret = begin_cmd(sctx, BTRFS_SEND_C_END);
- if (ret < 0)
- goto out;
- ret = send_cmd(sctx);
- if (ret < 0)
- goto out;
+ if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
+ ret = begin_cmd(sctx, BTRFS_SEND_C_END);
+ if (ret < 0)
+ goto out;
+ ret = send_cmd(sctx);
+ if (ret < 0)
+ goto out;
+ }
out:
kfree(arg);
#ifdef __KERNEL__
long btrfs_ioctl_send(struct file *mnt_file, void __user *arg);
-int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off);
#endif
#include "compression.h"
#include "rcu-string.h"
#include "dev-replace.h"
+#include "free-space-cache.h"
#define CREATE_TRACE_POINTS
#include <trace/events/btrfs.h>
static const struct super_operations btrfs_super_ops;
static struct file_system_type btrfs_fs_type;
-static const char *btrfs_decode_error(int errno, char nbuf[16])
+static const char *btrfs_decode_error(int errno)
{
- char *errstr = NULL;
+ char *errstr = "unknown";
switch (errno) {
case -EIO:
case -EEXIST:
errstr = "Object already exists";
break;
- default:
- if (nbuf) {
- if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
- errstr = nbuf;
- }
+ case -ENOSPC:
+ errstr = "No space left";
+ break;
+ case -ENOENT:
+ errstr = "No such entry";
break;
}
return errstr;
}
-static void __save_error_info(struct btrfs_fs_info *fs_info)
+static void save_error_info(struct btrfs_fs_info *fs_info)
{
/*
* today we only save the error info into ram. Long term we'll
set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
}
-static void save_error_info(struct btrfs_fs_info *fs_info)
-{
- __save_error_info(fs_info);
-}
-
/* btrfs handle error by forcing the filesystem readonly */
static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
{
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
sb->s_flags |= MS_RDONLY;
- printk(KERN_INFO "btrfs is forced readonly\n");
+ btrfs_info(fs_info, "forced readonly");
/*
* Note that a running device replace operation is not
* canceled here although there is no way to update
* mounted writeable again, the device replace
* operation continues.
*/
-// WARN_ON(1);
}
}
unsigned int line, int errno, const char *fmt, ...)
{
struct super_block *sb = fs_info->sb;
- char nbuf[16];
const char *errstr;
/*
if (errno == -EROFS && (sb->s_flags & MS_RDONLY))
return;
- errstr = btrfs_decode_error(errno, nbuf);
+ errstr = btrfs_decode_error(errno);
if (fmt) {
struct va_format vaf;
va_list args;
vaf.fmt = fmt;
vaf.va = &args;
- printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s (%pV)\n",
- sb->s_id, function, line, errstr, &vaf);
+ printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: errno=%d %s (%pV)\n",
+ sb->s_id, function, line, errno, errstr, &vaf);
va_end(args);
} else {
- printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n",
- sb->s_id, function, line, errstr);
+ printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: errno=%d %s\n",
+ sb->s_id, function, line, errno, errstr);
}
/* Don't go through full error handling during mount */
- if (sb->s_flags & MS_BORN) {
- save_error_info(fs_info);
+ save_error_info(fs_info);
+ if (sb->s_flags & MS_BORN)
btrfs_handle_error(fs_info);
- }
}
static const char * const logtypes[] = {
"debug",
};
-void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...)
+void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
{
struct super_block *sb = fs_info->sb;
char lvl[4];
vaf.fmt = fmt;
vaf.va = &args;
- printk("%sBTRFS %s (device %s): %pV", lvl, type, sb->s_id, &vaf);
+ printk("%sBTRFS %s (device %s): %pV\n", lvl, type, sb->s_id, &vaf);
va_end(args);
}
struct btrfs_root *root, const char *function,
unsigned int line, int errno)
{
- WARN_ONCE(1, KERN_DEBUG "btrfs: Transaction aborted\n");
+ /*
+ * Report first abort since mount
+ */
+ if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED,
+ &root->fs_info->fs_state)) {
+ WARN(1, KERN_DEBUG "btrfs: Transaction aborted (error %d)\n",
+ errno);
+ }
trans->aborted = errno;
/* Nothing used. The other threads that have joined this
* transaction may be able to continue. */
if (!trans->blocks_used) {
- char nbuf[16];
const char *errstr;
- errstr = btrfs_decode_error(errno, nbuf);
- btrfs_printk(root->fs_info,
- "%s:%d: Aborting unused transaction(%s).\n",
- function, line, errstr);
+ errstr = btrfs_decode_error(errno);
+ btrfs_warn(root->fs_info,
+ "%s:%d: Aborting unused transaction(%s).",
+ function, line, errstr);
return;
}
ACCESS_ONCE(trans->transaction->aborted) = errno;
void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
unsigned int line, int errno, const char *fmt, ...)
{
- char nbuf[16];
char *s_id = "<unknown>";
const char *errstr;
struct va_format vaf = { .fmt = fmt };
va_start(args, fmt);
vaf.va = &args;
- errstr = btrfs_decode_error(errno, nbuf);
+ errstr = btrfs_decode_error(errno);
if (fs_info && (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR))
- panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
- s_id, function, line, &vaf, errstr);
+ panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
+ s_id, function, line, &vaf, errno, errstr);
- printk(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
- s_id, function, line, &vaf, errstr);
+ printk(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
+ s_id, function, line, &vaf, errno, errstr);
va_end(args);
/* Caller calls BUG() */
}
*/
static int btrfs_parse_early_options(const char *options, fmode_t flags,
void *holder, char **subvol_name, u64 *subvol_objectid,
- u64 *subvol_rootid, struct btrfs_fs_devices **fs_devices)
+ struct btrfs_fs_devices **fs_devices)
{
substring_t args[MAX_OPT_ARGS];
char *device_name, *opts, *orig, *p;
}
break;
case Opt_subvolrootid:
- intarg = 0;
- error = match_int(&args[0], &intarg);
- if (!error) {
- /* we want the original fs_tree */
- if (!intarg)
- *subvol_rootid =
- BTRFS_FS_TREE_OBJECTID;
- else
- *subvol_rootid = intarg;
- }
+ printk(KERN_WARNING
+ "btrfs: 'subvolrootid' mount option is deprecated and has no effect\n");
break;
case Opt_device:
device_name = match_strdup(&args[0]);
return 0;
}
- btrfs_wait_ordered_extents(root, 0);
+ btrfs_wait_ordered_extents(root, 1);
trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) {
fmode_t mode = FMODE_READ;
char *subvol_name = NULL;
u64 subvol_objectid = 0;
- u64 subvol_rootid = 0;
int error = 0;
if (!(flags & MS_RDONLY))
error = btrfs_parse_early_options(data, mode, fs_type,
&subvol_name, &subvol_objectid,
- &subvol_rootid, &fs_devices);
+ &fs_devices);
if (error) {
kfree(subvol_name);
return ERR_PTR(error);
new_pool_size);
}
-static inline void btrfs_remount_prepare(struct btrfs_fs_info *fs_info,
- unsigned long old_opts, int flags)
+static inline void btrfs_remount_prepare(struct btrfs_fs_info *fs_info)
{
set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
+}
+static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
+ unsigned long old_opts, int flags)
+{
if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
(!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
(flags & MS_RDONLY))) {
unsigned int old_metadata_ratio = fs_info->metadata_ratio;
int ret;
- btrfs_remount_prepare(fs_info, old_opts, *flags);
+ btrfs_remount_prepare(fs_info);
ret = btrfs_parse_options(root, data);
if (ret) {
goto restore;
}
+ btrfs_remount_begin(fs_info, old_opts, *flags);
btrfs_resize_thread_pool(fs_info,
fs_info->thread_pool_size, old_thread_pool_size);
btrfs_init_lockdep();
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+ btrfs_test_free_space_cache();
+#endif
+
printk(KERN_INFO "%s loaded\n", BTRFS_BUILD_VERSION);
return 0;
#define BTRFS_ROOT_TRANS_TAG 0
-void put_transaction(struct btrfs_transaction *transaction)
+static void put_transaction(struct btrfs_transaction *transaction)
{
WARN_ON(atomic_read(&transaction->use_count) == 0);
if (atomic_dec_and_test(&transaction->use_count)) {
if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
"creating a fresh transaction\n");
- atomic_set(&fs_info->tree_mod_seq, 0);
+ atomic64_set(&fs_info->tree_mod_seq, 0);
spin_lock_init(&cur_trans->commit_lock);
spin_lock_init(&cur_trans->delayed_refs.lock);
int btrfs_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- int ret;
-
- ret = __btrfs_end_transaction(trans, root, 0);
- if (ret)
- return ret;
- return 0;
+ return __btrfs_end_transaction(trans, root, 0);
}
int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- int ret;
-
- ret = __btrfs_end_transaction(trans, root, 1);
- if (ret)
- return ret;
- return 0;
+ return __btrfs_end_transaction(trans, root, 1);
}
int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
int btrfs_add_dead_root(struct btrfs_root *root)
{
spin_lock(&root->fs_info->trans_lock);
- list_add(&root->root_list, &root->fs_info->dead_roots);
+ list_add_tail(&root->root_list, &root->fs_info->dead_roots);
spin_unlock(&root->fs_info->trans_lock);
return 0;
}
memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
memcpy(new_root_item->parent_uuid, root->root_item.uuid,
BTRFS_UUID_SIZE);
+ if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
+ memset(new_root_item->received_uuid, 0,
+ sizeof(new_root_item->received_uuid));
+ memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
+ memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
+ btrfs_set_root_stransid(new_root_item, 0);
+ btrfs_set_root_rtransid(new_root_item, 0);
+ }
new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
btrfs_set_root_otransid(new_root_item, trans->transid);
- memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
- memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
- btrfs_set_root_stransid(new_root_item, 0);
- btrfs_set_root_rtransid(new_root_item, 0);
old = btrfs_lock_root_node(root);
ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
current->journal_info = NULL;
kmem_cache_free(btrfs_trans_handle_cachep, trans);
+
+ spin_lock(&root->fs_info->trans_lock);
+ root->fs_info->trans_no_join = 0;
+ spin_unlock(&root->fs_info->trans_lock);
}
static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
ret = btrfs_write_and_wait_transaction(trans, root);
if (ret) {
btrfs_error(root->fs_info, ret,
- "Error while writing out transaction.");
+ "Error while writing out transaction");
mutex_unlock(&root->fs_info->tree_log_mutex);
goto cleanup_transaction;
}
btrfs_qgroup_free(root, trans->qgroup_reserved);
trans->qgroup_reserved = 0;
}
- btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
-// WARN_ON(1);
+ btrfs_warn(root->fs_info, "Skipping commit of aborted transaction.");
if (current->journal_info == trans)
current->journal_info = NULL;
cleanup_transaction(trans, root, ret);
}
/*
- * interface function to delete all the snapshots we have scheduled for deletion
+ * return < 0 if error
+ * 0 if there are no more dead_roots at the time of call
+ * 1 there are more to be processed, call me again
+ *
+ * The return value indicates there are certainly more snapshots to delete, but
+ * if there comes a new one during processing, it may return 0. We don't mind,
+ * because btrfs_commit_super will poke cleaner thread and it will process it a
+ * few seconds later.
*/
-int btrfs_clean_old_snapshots(struct btrfs_root *root)
+int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
{
- LIST_HEAD(list);
+ int ret;
struct btrfs_fs_info *fs_info = root->fs_info;
+ if (fs_info->sb->s_flags & MS_RDONLY) {
+ pr_debug("btrfs: cleaner called for RO fs!\n");
+ return 0;
+ }
+
spin_lock(&fs_info->trans_lock);
- list_splice_init(&fs_info->dead_roots, &list);
+ if (list_empty(&fs_info->dead_roots)) {
+ spin_unlock(&fs_info->trans_lock);
+ return 0;
+ }
+ root = list_first_entry(&fs_info->dead_roots,
+ struct btrfs_root, root_list);
+ list_del(&root->root_list);
spin_unlock(&fs_info->trans_lock);
- while (!list_empty(&list)) {
- int ret;
-
- root = list_entry(list.next, struct btrfs_root, root_list);
- list_del(&root->root_list);
+ pr_debug("btrfs: cleaner removing %llu\n",
+ (unsigned long long)root->objectid);
- btrfs_kill_all_delayed_nodes(root);
+ btrfs_kill_all_delayed_nodes(root);
- if (btrfs_header_backref_rev(root->node) <
- BTRFS_MIXED_BACKREF_REV)
- ret = btrfs_drop_snapshot(root, NULL, 0, 0);
- else
- ret =btrfs_drop_snapshot(root, NULL, 1, 0);
- BUG_ON(ret < 0);
- }
- return 0;
+ if (btrfs_header_backref_rev(root->node) <
+ BTRFS_MIXED_BACKREF_REV)
+ ret = btrfs_drop_snapshot(root, NULL, 0, 0);
+ else
+ ret = btrfs_drop_snapshot(root, NULL, 1, 0);
+ /*
+ * If we encounter a transaction abort during snapshot cleaning, we
+ * don't want to crash here
+ */
+ BUG_ON(ret < 0 && ret != -EAGAIN && ret != -EROFS);
+ return 1;
}
int btrfs_add_dead_root(struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root);
-int btrfs_clean_old_snapshots(struct btrfs_root *root);
+int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
struct extent_io_tree *dirty_pages, int mark);
int btrfs_transaction_blocked(struct btrfs_fs_info *info);
int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
-void put_transaction(struct btrfs_transaction *transaction);
#endif
struct extent_buffer *eb,
struct walk_control *wc, u64 gen)
{
+ int ret = 0;
+
if (wc->pin)
- btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
- eb->start, eb->len);
+ ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
+ eb->start, eb->len);
- if (btrfs_buffer_uptodate(eb, gen, 0)) {
+ if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
if (wc->write)
btrfs_write_tree_block(eb);
if (wc->wait)
btrfs_wait_tree_block_writeback(eb);
}
- return 0;
+ return ret;
}
/*
found_size = btrfs_item_size_nr(path->nodes[0],
path->slots[0]);
if (found_size > item_size)
- btrfs_truncate_item(trans, root, path, item_size, 1);
+ btrfs_truncate_item(root, path, item_size, 1);
else if (found_size < item_size)
- btrfs_extend_item(trans, root, path,
+ btrfs_extend_item(root, path,
item_size - found_size);
} else if (ret) {
return ret;
/* drop any overlapping extents */
ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
- BUG_ON(ret);
+ if (ret)
+ goto out;
if (found_type == BTRFS_FILE_EXTENT_REG ||
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
ret = btrfs_insert_empty_item(trans, root, path, key,
sizeof(*item));
- BUG_ON(ret);
+ if (ret)
+ goto out;
dest_offset = btrfs_item_ptr_offset(path->nodes[0],
path->slots[0]);
copy_extent_buffer(path->nodes[0], eb, dest_offset,
ins.objectid, ins.offset,
0, root->root_key.objectid,
key->objectid, offset, 0);
- BUG_ON(ret);
+ if (ret)
+ goto out;
} else {
/*
* insert the extent pointer in the extent
ret = btrfs_alloc_logged_file_extent(trans,
root, root->root_key.objectid,
key->objectid, offset, &ins);
- BUG_ON(ret);
+ if (ret)
+ goto out;
}
btrfs_release_path(path);
ret = btrfs_lookup_csums_range(root->log_root,
csum_start, csum_end - 1,
&ordered_sums, 0);
- BUG_ON(ret);
+ if (ret)
+ goto out;
while (!list_empty(&ordered_sums)) {
struct btrfs_ordered_sum *sums;
sums = list_entry(ordered_sums.next,
struct btrfs_ordered_sum,
list);
- ret = btrfs_csum_file_blocks(trans,
+ if (!ret)
+ ret = btrfs_csum_file_blocks(trans,
root->fs_info->csum_root,
sums);
- BUG_ON(ret);
list_del(&sums->list);
kfree(sums);
}
+ if (ret)
+ goto out;
} else {
btrfs_release_path(path);
}
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
/* inline extents are easy, we just overwrite them */
ret = overwrite_item(trans, root, path, eb, slot, key);
- BUG_ON(ret);
+ if (ret)
+ goto out;
}
inode_add_bytes(inode, nbytes);
inode = read_one_inode(root, location.objectid);
if (!inode) {
- kfree(name);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
ret = link_to_fixup_dir(trans, root, path, location.objectid);
- BUG_ON(ret);
+ if (ret)
+ goto out;
ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
- BUG_ON(ret);
+ if (ret)
+ goto out;
+ btrfs_run_delayed_items(trans, root);
+out:
kfree(name);
-
iput(inode);
-
- btrfs_run_delayed_items(trans, root);
return ret;
}
victim_name_len = btrfs_inode_ref_name_len(leaf,
victim_ref);
victim_name = kmalloc(victim_name_len, GFP_NOFS);
- BUG_ON(!victim_name);
+ if (!victim_name)
+ return -ENOMEM;
read_extent_buffer(leaf, victim_name,
(unsigned long)(victim_ref + 1),
ret = btrfs_unlink_inode(trans, root, dir,
inode, victim_name,
victim_name_len);
- BUG_ON(ret);
- btrfs_run_delayed_items(trans, root);
kfree(victim_name);
+ if (ret)
+ return ret;
+ btrfs_run_delayed_items(trans, root);
*search_done = 1;
goto again;
}
ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
}
- BUG_ON(ret);
/*
* NOTE: we have searched root tree and checked the
goto next;
victim_name = kmalloc(victim_name_len, GFP_NOFS);
+ if (!victim_name)
+ return -ENOMEM;
read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
victim_name_len);
victim_name_len);
btrfs_run_delayed_items(trans, root);
}
- BUG_ON(ret);
iput(victim_parent);
kfree(victim_name);
+ if (ret)
+ return ret;
*search_done = 1;
goto again;
}
kfree(victim_name);
- BUG_ON(ret);
+ if (ret)
+ return ret;
next:
cur_offset += victim_name_len + sizeof(*extref);
}
ref_index, name, namelen, 0);
if (di && !IS_ERR(di)) {
ret = drop_one_dir_item(trans, root, path, dir, di);
- BUG_ON(ret);
+ if (ret)
+ return ret;
}
btrfs_release_path(path);
name, namelen, 0);
if (di && !IS_ERR(di)) {
ret = drop_one_dir_item(trans, root, path, dir, di);
- BUG_ON(ret);
+ if (ret)
+ return ret;
}
btrfs_release_path(path);
parent_objectid,
ref_index, name, namelen,
&search_done);
- if (ret == 1)
+ if (ret == 1) {
+ ret = 0;
+ goto out;
+ }
+ if (ret)
goto out;
- BUG_ON(ret);
}
/* insert our name */
ret = btrfs_add_link(trans, dir, inode, name, namelen,
0, ref_index);
- BUG_ON(ret);
+ if (ret)
+ goto out;
btrfs_update_inode(trans, root, inode);
}
/* finally write the back reference in the inode */
ret = overwrite_item(trans, root, path, eb, slot, key);
- BUG_ON(ret);
-
out:
btrfs_release_path(path);
iput(dir);
iput(inode);
- return 0;
+ return ret;
}
static int insert_orphan_item(struct btrfs_trans_handle *trans,
if (S_ISDIR(inode->i_mode)) {
ret = replay_dir_deletes(trans, root, NULL, path,
ino, 1);
- BUG_ON(ret);
+ if (ret)
+ goto out;
}
ret = insert_orphan_item(trans, root, ino);
- BUG_ON(ret);
}
out:
return -EIO;
ret = fixup_inode_link_count(trans, root, inode);
- BUG_ON(ret);
-
iput(inode);
+ if (ret)
+ goto out;
/*
* fixup on a directory may create new entries,
} else if (ret == -EEXIST) {
ret = 0;
} else {
- BUG();
+ BUG(); /* Logic Error */
}
iput(inode);
struct inode *dir;
u8 log_type;
int exists;
- int ret;
+ int ret = 0;
dir = read_one_inode(root, key->objectid);
if (!dir)
key->offset, name,
name_len, 1);
} else {
- BUG();
+ /* Corruption */
+ ret = -EINVAL;
+ goto out;
}
if (IS_ERR_OR_NULL(dst_di)) {
/* we need a sequence number to insert, so we only
goto out;
ret = drop_one_dir_item(trans, root, path, dir, dst_di);
- BUG_ON(ret);
+ if (ret)
+ goto out;
if (key->type == BTRFS_DIR_INDEX_KEY)
goto insert;
btrfs_release_path(path);
kfree(name);
iput(dir);
- return 0;
+ return ret;
insert:
btrfs_release_path(path);
ret = insert_one_name(trans, root, path, key->objectid, key->offset,
name, name_len, log_type, &log_key);
-
- BUG_ON(ret && ret != -ENOENT);
+ if (ret && ret != -ENOENT)
+ goto out;
+ ret = 0;
goto out;
}
return -EIO;
name_len = btrfs_dir_name_len(eb, di);
ret = replay_one_name(trans, root, path, eb, di, key);
- BUG_ON(ret);
+ if (ret)
+ return ret;
ptr = (unsigned long)(di + 1);
ptr += name_len;
}
ret = link_to_fixup_dir(trans, root,
path, location.objectid);
- BUG_ON(ret);
+ if (ret) {
+ kfree(name);
+ iput(inode);
+ goto out;
+ }
+
btrfs_inc_nlink(inode);
ret = btrfs_unlink_inode(trans, root, dir, inode,
name, name_len);
- BUG_ON(ret);
-
- btrfs_run_delayed_items(trans, root);
-
+ if (!ret)
+ btrfs_run_delayed_items(trans, root);
kfree(name);
iput(inode);
+ if (ret)
+ goto out;
/* there might still be more names under this key
* check and repeat if required
ret = check_item_in_log(trans, root, log, path,
log_path, dir,
&found_key);
- BUG_ON(ret);
+ if (ret)
+ goto out;
if (found_key.offset == (u64)-1)
break;
dir_key.offset = found_key.offset + 1;
if (S_ISDIR(mode)) {
ret = replay_dir_deletes(wc->trans,
root, log, path, key.objectid, 0);
- BUG_ON(ret);
+ if (ret)
+ break;
}
ret = overwrite_item(wc->trans, root, path,
eb, i, &key);
- BUG_ON(ret);
+ if (ret)
+ break;
/* for regular files, make sure corresponding
* orhpan item exist. extents past the new EOF
if (S_ISREG(mode)) {
ret = insert_orphan_item(wc->trans, root,
key.objectid);
- BUG_ON(ret);
+ if (ret)
+ break;
}
ret = link_to_fixup_dir(wc->trans, root,
path, key.objectid);
- BUG_ON(ret);
+ if (ret)
+ break;
}
if (wc->stage < LOG_WALK_REPLAY_ALL)
continue;
if (key.type == BTRFS_XATTR_ITEM_KEY) {
ret = overwrite_item(wc->trans, root, path,
eb, i, &key);
- BUG_ON(ret);
+ if (ret)
+ break;
} else if (key.type == BTRFS_INODE_REF_KEY) {
ret = add_inode_ref(wc->trans, root, log, path,
eb, i, &key);
- BUG_ON(ret && ret != -ENOENT);
+ if (ret && ret != -ENOENT)
+ break;
+ ret = 0;
} else if (key.type == BTRFS_INODE_EXTREF_KEY) {
ret = add_inode_ref(wc->trans, root, log, path,
eb, i, &key);
- BUG_ON(ret && ret != -ENOENT);
+ if (ret && ret != -ENOENT)
+ break;
+ ret = 0;
} else if (key.type == BTRFS_EXTENT_DATA_KEY) {
ret = replay_one_extent(wc->trans, root, path,
eb, i, &key);
- BUG_ON(ret);
+ if (ret)
+ break;
} else if (key.type == BTRFS_DIR_ITEM_KEY ||
key.type == BTRFS_DIR_INDEX_KEY) {
ret = replay_one_dir_item(wc->trans, root, path,
eb, i, &key);
- BUG_ON(ret);
+ if (ret)
+ break;
}
}
btrfs_free_path(path);
- return 0;
+ return ret;
}
static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
if (*level == 1) {
ret = wc->process_func(root, next, wc, ptr_gen);
- if (ret)
+ if (ret) {
+ free_extent_buffer(next);
return ret;
+ }
path->slots[*level]++;
if (wc->free) {
BTRFS_TREE_LOG_OBJECTID);
ret = btrfs_free_and_pin_reserved_extent(root,
bytenr, blocksize);
- BUG_ON(ret); /* -ENOMEM or logic errors */
+ if (ret) {
+ free_extent_buffer(next);
+ return ret;
+ }
}
free_extent_buffer(next);
continue;
ret = btrfs_free_and_pin_reserved_extent(root,
path->nodes[*level]->start,
path->nodes[*level]->len);
- BUG_ON(ret);
+ if (ret)
+ return ret;
}
free_extent_buffer(path->nodes[*level]);
path->nodes[*level] = NULL;
int wret;
int level;
struct btrfs_path *path;
- int i;
int orig_level;
path = btrfs_alloc_path();
BTRFS_TREE_LOG_OBJECTID);
ret = btrfs_free_and_pin_reserved_extent(log, next->start,
next->len);
- BUG_ON(ret); /* -ENOMEM or logic errors */
+ if (ret)
+ goto out;
}
}
out:
- for (i = 0; i <= orig_level; i++) {
- if (path->nodes[i]) {
- free_extent_buffer(path->nodes[i]);
- path->nodes[i] = NULL;
- }
- }
btrfs_free_path(path);
return ret;
}
if (trans) {
ret = walk_log_tree(trans, log, &wc);
- BUG_ON(ret);
+
+ /* I don't think this can happen but just in case */
+ if (ret)
+ btrfs_abort_transaction(trans, log, ret);
}
while (1) {
if (di) {
ret = btrfs_delete_one_dir_name(trans, log, path, di);
bytes_del += name_len;
- BUG_ON(ret);
+ if (ret) {
+ err = ret;
+ goto fail;
+ }
}
btrfs_release_path(path);
di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
if (di) {
ret = btrfs_delete_one_dir_name(trans, log, path, di);
bytes_del += name_len;
- BUG_ON(ret);
+ if (ret) {
+ err = ret;
+ goto fail;
+ }
}
/* update the directory size in the log to reflect the names
while (1) {
ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
- BUG_ON(ret == 0);
+ BUG_ON(ret == 0); /* Logic error */
if (ret < 0)
break;
log->fs_info->csum_root,
ds + cs, ds + cs + cl - 1,
&ordered_sums, 0);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_release_path(dst_path);
+ kfree(ins_data);
+ return ret;
+ }
}
}
}
return 0;
}
-static int drop_adjacent_extents(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode,
- struct extent_map *em,
- struct btrfs_path *path)
-{
- struct btrfs_file_extent_item *fi;
- struct extent_buffer *leaf;
- struct btrfs_key key, new_key;
- struct btrfs_map_token token;
- u64 extent_end;
- u64 extent_offset = 0;
- int extent_type;
- int del_slot = 0;
- int del_nr = 0;
- int ret = 0;
-
- while (1) {
- btrfs_init_map_token(&token);
- leaf = path->nodes[0];
- path->slots[0]++;
- if (path->slots[0] >= btrfs_header_nritems(leaf)) {
- if (del_nr) {
- ret = btrfs_del_items(trans, root, path,
- del_slot, del_nr);
- if (ret)
- return ret;
- del_nr = 0;
- }
-
- ret = btrfs_next_leaf_write(trans, root, path, 1);
- if (ret < 0)
- return ret;
- if (ret > 0)
- return 0;
- leaf = path->nodes[0];
- }
-
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- if (key.objectid != btrfs_ino(inode) ||
- key.type != BTRFS_EXTENT_DATA_KEY ||
- key.offset >= em->start + em->len)
- break;
-
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- extent_type = btrfs_token_file_extent_type(leaf, fi, &token);
- if (extent_type == BTRFS_FILE_EXTENT_REG ||
- extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
- extent_offset = btrfs_token_file_extent_offset(leaf,
- fi, &token);
- extent_end = key.offset +
- btrfs_token_file_extent_num_bytes(leaf, fi,
- &token);
- } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
- extent_end = key.offset +
- btrfs_file_extent_inline_len(leaf, fi);
- } else {
- BUG();
- }
-
- if (extent_end <= em->len + em->start) {
- if (!del_nr) {
- del_slot = path->slots[0];
- }
- del_nr++;
- continue;
- }
-
- /*
- * Ok so we'll ignore previous items if we log a new extent,
- * which can lead to overlapping extents, so if we have an
- * existing extent we want to adjust we _have_ to check the next
- * guy to make sure we even need this extent anymore, this keeps
- * us from panicing in set_item_key_safe.
- */
- if (path->slots[0] < btrfs_header_nritems(leaf) - 1) {
- struct btrfs_key tmp_key;
-
- btrfs_item_key_to_cpu(leaf, &tmp_key,
- path->slots[0] + 1);
- if (tmp_key.objectid == btrfs_ino(inode) &&
- tmp_key.type == BTRFS_EXTENT_DATA_KEY &&
- tmp_key.offset <= em->start + em->len) {
- if (!del_nr)
- del_slot = path->slots[0];
- del_nr++;
- continue;
- }
- }
-
- BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
- memcpy(&new_key, &key, sizeof(new_key));
- new_key.offset = em->start + em->len;
- btrfs_set_item_key_safe(trans, root, path, &new_key);
- extent_offset += em->start + em->len - key.offset;
- btrfs_set_token_file_extent_offset(leaf, fi, extent_offset,
- &token);
- btrfs_set_token_file_extent_num_bytes(leaf, fi, extent_end -
- (em->start + em->len),
- &token);
- btrfs_mark_buffer_dirty(leaf);
- }
-
- if (del_nr)
- ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
-
- return ret;
-}
-
static int log_one_extent(struct btrfs_trans_handle *trans,
struct inode *inode, struct btrfs_root *root,
struct extent_map *em, struct btrfs_path *path)
int index = log->log_transid % 2;
bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
-insert:
+ ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
+ em->start + em->len, NULL, 0);
+ if (ret)
+ return ret;
+
INIT_LIST_HEAD(&ordered_sums);
btrfs_init_map_token(&token);
key.objectid = btrfs_ino(inode);
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = em->start;
- path->really_keep_locks = 1;
ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*fi));
- if (ret && ret != -EEXIST) {
- path->really_keep_locks = 0;
+ if (ret)
return ret;
- }
leaf = path->nodes[0];
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
- /*
- * If we are overwriting an inline extent with a real one then we need
- * to just delete the inline extent as it may not be large enough to
- * have the entire file_extent_item.
- */
- if (ret && btrfs_token_file_extent_type(leaf, fi, &token) ==
- BTRFS_FILE_EXTENT_INLINE) {
- ret = btrfs_del_item(trans, log, path);
- btrfs_release_path(path);
- if (ret) {
- path->really_keep_locks = 0;
- return ret;
- }
- goto insert;
- }
-
btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
&token);
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
em->start - em->orig_start,
&token);
btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
- btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->len, &token);
+ btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
&token);
btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
btrfs_mark_buffer_dirty(leaf);
- /*
- * Have to check the extent to the right of us to make sure it doesn't
- * fall in our current range. We're ok if the previous extent is in our
- * range since the recovery stuff will run us in key order and thus just
- * drop the part we overwrote.
- */
- ret = drop_adjacent_extents(trans, log, inode, em, path);
btrfs_release_path(path);
- path->really_keep_locks = 0;
if (ret) {
return ret;
}
bool fast_search = false;
u64 ino = btrfs_ino(inode);
- log = root->log_root;
-
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
* only logging is done of any parent directories that are older than
* the last committed transaction
*/
-int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode,
- struct dentry *parent, int exists_only)
+static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *inode,
+ struct dentry *parent, int exists_only)
{
int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
struct super_block *sb;
wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
if (IS_ERR(wc.replay_dest)) {
ret = PTR_ERR(wc.replay_dest);
+ free_extent_buffer(log->node);
+ free_extent_buffer(log->commit_root);
+ kfree(log);
btrfs_error(fs_info, ret, "Couldn't read target root "
"for tree log recovery.");
goto error;
wc.replay_dest->log_root = log;
btrfs_record_root_in_trans(trans, wc.replay_dest);
ret = walk_log_tree(trans, log, &wc);
- BUG_ON(ret);
- if (wc.stage == LOG_WALK_REPLAY_ALL) {
+ if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
ret = fixup_inode_link_counts(trans, wc.replay_dest,
path);
- BUG_ON(ret);
}
key.offset = found_key.offset - 1;
free_extent_buffer(log->commit_root);
kfree(log);
+ if (ret)
+ goto error;
+
if (found_key.offset == 0)
break;
}
btrfs_free_path(path);
+ /* step 4: commit the transaction, which also unpins the blocks */
+ ret = btrfs_commit_transaction(trans, fs_info->tree_root);
+ if (ret)
+ return ret;
+
free_extent_buffer(log_root_tree->node);
log_root_tree->log_root = NULL;
fs_info->log_root_recovering = 0;
-
- /* step 4: commit the transaction, which also unpins the blocks */
- btrfs_commit_transaction(trans, fs_info->tree_root);
-
kfree(log_root_tree);
- return 0;
+ return 0;
error:
+ if (wc.trans)
+ btrfs_end_transaction(wc.trans, fs_info->tree_root);
btrfs_free_path(path);
return ret;
}
struct inode *inode, u64 dirid);
void btrfs_end_log_trans(struct btrfs_root *root);
int btrfs_pin_log_trans(struct btrfs_root *root);
-int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode,
- struct dentry *parent, int exists_only);
void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
struct inode *dir, struct inode *inode,
int for_rename);
ulist->nnodes = 0;
ulist->nodes = ulist->int_nodes;
ulist->nodes_alloced = ULIST_SIZE;
+ ulist->root = RB_ROOT;
}
EXPORT_SYMBOL(ulist_init);
if (ulist->nodes_alloced > ULIST_SIZE)
kfree(ulist->nodes);
ulist->nodes_alloced = 0; /* in case ulist_fini is called twice */
+ ulist->root = RB_ROOT;
}
EXPORT_SYMBOL(ulist_fini);
}
EXPORT_SYMBOL(ulist_free);
+static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val)
+{
+ struct rb_node *n = ulist->root.rb_node;
+ struct ulist_node *u = NULL;
+
+ while (n) {
+ u = rb_entry(n, struct ulist_node, rb_node);
+ if (u->val < val)
+ n = n->rb_right;
+ else if (u->val > val)
+ n = n->rb_left;
+ else
+ return u;
+ }
+ return NULL;
+}
+
+static int ulist_rbtree_insert(struct ulist *ulist, struct ulist_node *ins)
+{
+ struct rb_node **p = &ulist->root.rb_node;
+ struct rb_node *parent = NULL;
+ struct ulist_node *cur = NULL;
+
+ while (*p) {
+ parent = *p;
+ cur = rb_entry(parent, struct ulist_node, rb_node);
+
+ if (cur->val < ins->val)
+ p = &(*p)->rb_right;
+ else if (cur->val > ins->val)
+ p = &(*p)->rb_left;
+ else
+ return -EEXIST;
+ }
+ rb_link_node(&ins->rb_node, parent, p);
+ rb_insert_color(&ins->rb_node, &ulist->root);
+ return 0;
+}
+
/**
* ulist_add - add an element to the ulist
* @ulist: ulist to add the element to
int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
u64 *old_aux, gfp_t gfp_mask)
{
- int i;
-
- for (i = 0; i < ulist->nnodes; ++i) {
- if (ulist->nodes[i].val == val) {
- if (old_aux)
- *old_aux = ulist->nodes[i].aux;
- return 0;
- }
+ int ret = 0;
+ struct ulist_node *node = NULL;
+ node = ulist_rbtree_search(ulist, val);
+ if (node) {
+ if (old_aux)
+ *old_aux = node->aux;
+ return 0;
}
if (ulist->nnodes >= ulist->nodes_alloced) {
}
ulist->nodes[ulist->nnodes].val = val;
ulist->nodes[ulist->nnodes].aux = aux;
+ ret = ulist_rbtree_insert(ulist, &ulist->nodes[ulist->nnodes]);
+ BUG_ON(ret);
++ulist->nnodes;
return 1;
#ifndef __ULIST__
#define __ULIST__
+#include <linux/list.h>
+#include <linux/rbtree.h>
+
/*
* ulist is a generic data structure to hold a collection of unique u64
* values. The only operations it supports is adding to the list and
struct ulist_node {
u64 val; /* value to store */
u64 aux; /* auxiliary value saved along with the val */
+ struct rb_node rb_node; /* used to speed up search */
};
struct ulist {
*/
struct ulist_node *nodes;
+ struct rb_root root;
+
/*
* inline storage space for the first ULIST_SIZE entries
*/
struct btrfs_device *device);
static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
+static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
static DEFINE_MUTEX(uuid_mutex);
if (!device->name)
continue;
- ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
- &bdev, &bh);
- if (ret)
+ /* Just open everything we can; ignore failures here */
+ if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
+ &bdev, &bh))
continue;
disk_super = (struct btrfs_super_block *)bh->b_data;
return ret;
}
-int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
- struct btrfs_device *device,
- u64 chunk_tree, u64 chunk_objectid,
- u64 chunk_offset, u64 start, u64 num_bytes)
+static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_device *device,
+ u64 chunk_tree, u64 chunk_objectid,
+ u64 chunk_offset, u64 start, u64 num_bytes)
{
int ret;
struct btrfs_path *path;
* the device information is stored in the chunk root
* the btrfs_device struct should be fully filled in
*/
-int btrfs_add_device(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_device *device)
+static int btrfs_add_device(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_device *device)
{
int ret;
struct btrfs_path *path;
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
}
-int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
- struct btrfs_device **device)
+static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
+ struct btrfs_device **device)
{
int ret = 0;
struct btrfs_super_block *disk_super;
return 0;
}
-struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
+static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
[BTRFS_RAID_RAID10] = {
.sub_stripes = 2,
.dev_stripes = 1,
static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
{
- u64 features;
-
if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
return;
- features = btrfs_super_incompat_flags(info->super_copy);
- if (features & BTRFS_FEATURE_INCOMPAT_RAID56)
- return;
-
- features |= BTRFS_FEATURE_INCOMPAT_RAID56;
- btrfs_set_super_incompat_flags(info->super_copy, features);
- printk(KERN_INFO "btrfs: setting RAID5/6 feature flag\n");
+ btrfs_set_fs_incompat(info, RAID56);
}
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
em_tree = &extent_root->fs_info->mapping_tree.map_tree;
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em);
+ ret = add_extent_mapping(em_tree, em, 0);
write_unlock(&em_tree->lock);
if (ret) {
free_extent_map(em);
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, logical, len);
read_unlock(&em_tree->lock);
- BUG_ON(!em);
- BUG_ON(em->start > logical || em->start + em->len < logical);
+ /*
+ * We could return errors for these cases, but that could get ugly and
+ * we'd probably do the same thing which is just not do anything else
+ * and exit, so return 1 so the callers don't try to use other copies.
+ */
+ if (!em) {
+ btrfs_emerg(fs_info, "No mapping for %Lu-%Lu\n", logical,
+ logical+len);
+ return 1;
+ }
+
+ if (em->start > logical || em->start + em->len < logical) {
+ btrfs_emerg(fs_info, "Invalid mapping for %Lu-%Lu, got "
+ "%Lu-%Lu\n", logical, logical+len, em->start,
+ em->start + em->len);
+ return 1;
+ }
+
map = (struct map_lookup *)em->bdev;
if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
ret = map->num_stripes;
read_unlock(&em_tree->lock);
if (!em) {
- printk(KERN_CRIT "btrfs: unable to find logical %llu len %llu\n",
- (unsigned long long)logical,
- (unsigned long long)*length);
- BUG();
+ btrfs_crit(fs_info, "unable to find logical %llu len %llu",
+ (unsigned long long)logical,
+ (unsigned long long)*length);
+ return -EINVAL;
+ }
+
+ if (em->start > logical || em->start + em->len < logical) {
+ btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
+ "found %Lu-%Lu\n", logical, em->start,
+ em->start + em->len);
+ return -EINVAL;
}
- BUG_ON(em->start > logical || em->start + em->len < logical);
map = (struct map_lookup *)em->bdev;
offset = logical - em->start;
* This will add one bio to the pending list for a device and make sure
* the work struct is scheduled.
*/
-noinline void btrfs_schedule_bio(struct btrfs_root *root,
- struct btrfs_device *device,
- int rw, struct bio *bio)
+static noinline void btrfs_schedule_bio(struct btrfs_root *root,
+ struct btrfs_device *device,
+ int rw, struct bio *bio)
{
int should_queue = 1;
struct btrfs_pending_bios *pending_bios;
}
if (map_length < length) {
- printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
- "len %llu\n", (unsigned long long)logical,
- (unsigned long long)length,
- (unsigned long long)map_length);
+ btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
+ (unsigned long long)logical,
+ (unsigned long long)length,
+ (unsigned long long)map_length);
BUG();
}
}
write_lock(&map_tree->map_tree.lock);
- ret = add_extent_mapping(&map_tree->map_tree, em);
+ ret = add_extent_mapping(&map_tree->map_tree, em, 0);
write_unlock(&map_tree->map_tree.lock);
BUG_ON(ret); /* Tree corruption */
free_extent_map(em);
return -EIO;
if (!device) {
- printk(KERN_WARNING "warning devid %llu missing\n",
- (unsigned long long)devid);
+ btrfs_warn(root->fs_info, "devid %llu missing",
+ (unsigned long long)devid);
device = add_missing_dev(root, devid, dev_uuid);
if (!device)
return -ENOMEM;
btrfs_dev_stat_print_on_error(dev);
}
-void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
+static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
{
if (!dev->dev_stats_valid)
return;
#define btrfs_bio_size(n) (sizeof(struct btrfs_bio) + \
(sizeof(struct btrfs_bio_stripe) * (n)))
-int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
- struct btrfs_device *device,
- u64 chunk_tree, u64 chunk_objectid,
- u64 chunk_offset, u64 start, u64 num_bytes);
int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num);
int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
char *device_path,
struct btrfs_device **device);
-int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
- struct btrfs_device **device);
-int btrfs_add_device(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_device *device);
int btrfs_rm_device(struct btrfs_root *root, char *device_path);
void btrfs_cleanup_fs_uuids(void);
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
u64 *start, u64 *max_avail);
-void btrfs_dev_stat_print_on_error(struct btrfs_device *device);
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
int btrfs_get_dev_stats(struct btrfs_root *root,
struct btrfs_ioctl_get_dev_stats *stats);
void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
struct btrfs_device *tgtdev);
int btrfs_scratch_superblock(struct btrfs_device *device);
-void btrfs_schedule_bio(struct btrfs_root *root,
- struct btrfs_device *device,
- int rw, struct bio *bio);
int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
u64 logical, u64 len, int mirror_num);
unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
XATTR_REPLACE);
}
-int btrfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
- void *fs_info)
+static int btrfs_initxattrs(struct inode *inode,
+ const struct xattr *xattr_array, void *fs_info)
{
const struct xattr *xattr;
struct btrfs_trans_handle *trans = fs_info;
struct inode *ecryptfs_inode)
{
struct file *lower_file;
- mm_segment_t fs_save;
- ssize_t rc;
-
lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file;
if (!lower_file)
return -EIO;
- fs_save = get_fs();
- set_fs(get_ds());
- rc = vfs_read(lower_file, data, size, &offset);
- set_fs(fs_save);
- return rc;
+ return kernel_read(lower_file, offset, data, size);
}
/**
const nfs4_stateid *);
int (*find_root_sec)(struct nfs_server *, struct nfs_fh *,
struct nfs_fsinfo *);
+ int (*free_lock_state)(struct nfs_server *,
+ struct nfs4_lock_state *);
const struct nfs4_state_recovery_ops *reboot_recovery_ops;
const struct nfs4_state_recovery_ops *nograce_recovery_ops;
const struct nfs4_state_maintenance_ops *state_renewal_ops;
extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *, struct qstr *,
struct nfs_fh *, struct nfs_fattr *);
extern int nfs4_proc_secinfo(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
-extern int nfs4_release_lockowner(struct nfs4_lock_state *);
extern const struct xattr_handler *nfs4_xattr_handlers[];
extern int nfs4_set_rw_stateid(nfs4_stateid *stateid,
const struct nfs_open_context *ctx,
struct list_head ds_addrs;
struct nfs_client *ds_clp;
atomic_t ds_count;
+ unsigned long ds_state;
+#define NFS4DS_CONNECTING 0 /* ds is establishing connection */
};
struct nfs4_file_layout_dsaddr {
return flseg->fh_array[i];
}
+static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
+{
+ might_sleep();
+ wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING,
+ nfs_wait_bit_killable, TASK_KILLABLE);
+}
+
+static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
+{
+ smp_mb__before_clear_bit();
+ clear_bit(NFS4DS_CONNECTING, &ds->ds_state);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING);
+}
+
+
struct nfs4_pnfs_ds *
nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
{
filelayout_mark_devid_invalid(devid);
return NULL;
}
+ if (ds->ds_clp)
+ return ds;
- if (!ds->ds_clp) {
+ if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
int err;
err = nfs4_ds_connect(s, ds);
if (err) {
nfs4_mark_deviceid_unavailable(devid);
- return NULL;
+ ds = NULL;
}
+ nfs4_clear_ds_conn_bit(ds);
+ } else {
+ /* Either ds is connected, or ds is NULL */
+ nfs4_wait_ds_connect(ds);
}
return ds;
}
if (status != 0)
goto out;
/* Is this a delegated lock? */
- if (test_bit(NFS_DELEGATED_STATE, &state->flags))
- goto out;
lsp = request->fl_u.nfs4_fl.owner;
+ if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
+ goto out;
seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
status = -ENOMEM;
if (seqid == NULL)
.rpc_release = nfs4_release_lockowner_release,
};
-int nfs4_release_lockowner(struct nfs4_lock_state *lsp)
+static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
{
- struct nfs_server *server = lsp->ls_state->owner->so_server;
struct nfs_release_lockowner_data *data;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
return err;
}
-static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
-{
- struct nfs41_free_stateid_args args = {
- .stateid = stateid,
- };
+struct nfs_free_stateid_data {
+ struct nfs_server *server;
+ struct nfs41_free_stateid_args args;
struct nfs41_free_stateid_res res;
+};
+
+static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs_free_stateid_data *data = calldata;
+ nfs41_setup_sequence(nfs4_get_session(data->server),
+ &data->args.seq_args,
+ &data->res.seq_res,
+ task);
+}
+
+static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs_free_stateid_data *data = calldata;
+
+ nfs41_sequence_done(task, &data->res.seq_res);
+
+ switch (task->tk_status) {
+ case -NFS4ERR_DELAY:
+ if (nfs4_async_handle_error(task, data->server, NULL) == -EAGAIN)
+ rpc_restart_call_prepare(task);
+ }
+}
+
+static void nfs41_free_stateid_release(void *calldata)
+{
+ kfree(calldata);
+}
+
+const struct rpc_call_ops nfs41_free_stateid_ops = {
+ .rpc_call_prepare = nfs41_free_stateid_prepare,
+ .rpc_call_done = nfs41_free_stateid_done,
+ .rpc_release = nfs41_free_stateid_release,
+};
+
+static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
+ nfs4_stateid *stateid,
+ bool privileged)
+{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
- .rpc_argp = &args,
- .rpc_resp = &res,
};
- int status;
+ struct rpc_task_setup task_setup = {
+ .rpc_client = server->client,
+ .rpc_message = &msg,
+ .callback_ops = &nfs41_free_stateid_ops,
+ .flags = RPC_TASK_ASYNC,
+ };
+ struct nfs_free_stateid_data *data;
dprintk("NFS call free_stateid %p\n", stateid);
- nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
- nfs4_set_sequence_privileged(&args.seq_args);
- status = nfs4_call_sync_sequence(server->client, server, &msg,
- &args.seq_args, &res.seq_res);
- dprintk("NFS reply free_stateid: %d\n", status);
- return status;
+ data = kmalloc(sizeof(*data), GFP_NOFS);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+ data->server = server;
+ nfs4_stateid_copy(&data->args.stateid, stateid);
+
+ task_setup.callback_data = data;
+
+ msg.rpc_argp = &data->args;
+ msg.rpc_resp = &data->res;
+ nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
+ if (privileged)
+ nfs4_set_sequence_privileged(&data->args.seq_args);
+
+ return rpc_run_task(&task_setup);
}
/**
*/
static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
{
- struct nfs4_exception exception = { };
- int err;
- do {
- err = _nfs4_free_stateid(server, stateid);
- if (err != -NFS4ERR_DELAY)
- break;
- nfs4_handle_exception(server, err, &exception);
- } while (exception.retry);
- return err;
+ struct rpc_task *task;
+ int ret;
+
+ task = _nfs41_free_stateid(server, stateid, true);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ ret = rpc_wait_for_completion_task(task);
+ if (!ret)
+ ret = task->tk_status;
+ rpc_put_task(task);
+ return ret;
+}
+
+static int nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
+{
+ struct rpc_task *task;
+
+ task = _nfs41_free_stateid(server, &lsp->ls_stateid, false);
+ nfs4_free_lock_state(server, lsp);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ rpc_put_task(task);
+ return 0;
}
static bool nfs41_match_stateid(const nfs4_stateid *s1,
.call_sync = _nfs4_call_sync,
.match_stateid = nfs4_match_stateid,
.find_root_sec = nfs4_find_root_sec,
+ .free_lock_state = nfs4_release_lockowner,
.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
.state_renewal_ops = &nfs40_state_renewal_ops,
.call_sync = nfs4_call_sync_sequence,
.match_stateid = nfs41_match_stateid,
.find_root_sec = nfs41_find_root_sec,
+ .free_lock_state = nfs41_free_lock_state,
.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
.state_renewal_ops = &nfs41_state_renewal_ops,
*/
void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
{
+ struct nfs_server *server;
struct nfs4_state *state;
if (lsp == NULL)
if (list_empty(&state->lock_states))
clear_bit(LK_STATE_IN_USE, &state->flags);
spin_unlock(&state->state_lock);
+ server = state->owner->so_server;
if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
- if (nfs4_release_lockowner(lsp) == 0)
- return;
- }
- nfs4_free_lock_state(lsp->ls_state->owner->so_server, lsp);
+ struct nfs_client *clp = server->nfs_client;
+
+ clp->cl_mvops->free_lock_state(server, lsp);
+ } else
+ nfs4_free_lock_state(server, lsp);
}
static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
struct compound_hdr *hdr)
{
encode_op_hdr(xdr, OP_FREE_STATEID, decode_free_stateid_maxsz, hdr);
- encode_nfs4_stateid(xdr, args->stateid);
+ encode_nfs4_stateid(xdr, &args->stateid);
}
#endif /* CONFIG_NFS_V4_1 */
/*
* Select a security flavor for this mount. The selected flavor
* is planted in args->auth_flavors[0].
+ *
+ * Returns 0 on success, -EACCES on failure.
*/
-static void nfs_select_flavor(struct nfs_parsed_mount_data *args,
+static int nfs_select_flavor(struct nfs_parsed_mount_data *args,
struct nfs_mount_request *request)
{
unsigned int i, count = *(request->auth_flav_len);
rpc_authflavor_t flavor;
- if (args->auth_flavors[0] != RPC_AUTH_MAXFLAVOR)
- goto out;
-
/*
* The NFSv2 MNT operation does not return a flavor list.
*/
if (count == 0)
goto out_default;
+ /*
+ * If the sec= mount option is used, the specified flavor or AUTH_NULL
+ * must be in the list returned by the server.
+ *
+ * AUTH_NULL has a special meaning when it's in the server list - it
+ * means that the server will ignore the rpc creds, so any flavor
+ * can be used.
+ */
+ if (args->auth_flavors[0] != RPC_AUTH_MAXFLAVOR) {
+ for (i = 0; i < count; i++) {
+ if (args->auth_flavors[0] == request->auth_flavs[i] ||
+ request->auth_flavs[i] == RPC_AUTH_NULL)
+ goto out;
+ }
+ dfprintk(MOUNT, "NFS: auth flavor %d not supported by server\n",
+ args->auth_flavors[0]);
+ goto out_err;
+ }
+
/*
* RFC 2623, section 2.7 suggests we SHOULD prefer the
* flavor listed first. However, some servers list
}
}
+ /*
+ * As a last chance, see if the server list contains AUTH_NULL -
+ * if it does, use the default flavor.
+ */
+ for (i = 0; i < count; i++) {
+ if (request->auth_flavs[i] == RPC_AUTH_NULL)
+ goto out_default;
+ }
+
+ dfprintk(MOUNT, "NFS: no auth flavors in common with server\n");
+ goto out_err;
+
out_default:
- flavor = RPC_AUTH_UNIX;
+ /* use default if flavor not already set */
+ flavor = (args->auth_flavors[0] == RPC_AUTH_MAXFLAVOR) ?
+ RPC_AUTH_UNIX : args->auth_flavors[0];
out_set:
args->auth_flavors[0] = flavor;
out:
dfprintk(MOUNT, "NFS: using auth flavor %d\n", args->auth_flavors[0]);
+ return 0;
+out_err:
+ return -EACCES;
}
/*
return status;
}
- nfs_select_flavor(args, &request);
- return 0;
+ return nfs_select_flavor(args, &request);
}
struct dentry *nfs_try_mount(int flags, const char *dev_name,
pr_err("memory size too small, minimum is %zu\n",
cxt->console_size + cxt->record_size +
cxt->ftrace_size);
+ err = -EINVAL;
goto fail_cnt;
}
spin_lock_init(&cxt->pstore.buf_lock);
if (!cxt->pstore.buf) {
pr_err("cannot allocate pstore buffer\n");
+ err = -ENOMEM;
goto fail_clear;
}
If unsure, say N.
+config XFS_WARN
+ bool "XFS Verbose Warnings"
+ depends on XFS_FS && !XFS_DEBUG
+ help
+ Say Y here to get an XFS build with many additional warnings.
+ It converts ASSERT checks to WARN, so will log any out-of-bounds
+ conditions that occur that would otherwise be missed. It is much
+ lighter weight than XFS_DEBUG and does not modify algorithms and will
+ not cause the kernel to panic on non-fatal errors.
+
+ However, similar to XFS_DEBUG, it is only advisable to use this if you
+ are debugging a particular problem.
+
config XFS_DEBUG
bool "XFS Debugging support"
depends on XFS_FS
typedef struct {
struct rw_semaphore mr_lock;
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
int mr_writer;
#endif
} mrlock_t;
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
#define mrinit(mrp, name) \
do { (mrp)->mr_writer = 0; init_rwsem(&(mrp)->mr_lock); } while (0)
#else
static inline void mrupdate_nested(mrlock_t *mrp, int subclass)
{
down_write_nested(&mrp->mr_lock, subclass);
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
mrp->mr_writer = 1;
#endif
}
{
if (!down_write_trylock(&mrp->mr_lock))
return 0;
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
mrp->mr_writer = 1;
#endif
return 1;
static inline void mrunlock_excl(mrlock_t *mrp)
{
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
mrp->mr_writer = 0;
#endif
up_write(&mrp->mr_lock);
static inline void mrdemote(mrlock_t *mrp)
{
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
mrp->mr_writer = 0;
#endif
downgrade_write(&mrp->mr_lock);
#define XFS_BUF_LOCK_TRACKING 1
#endif
+#ifdef CONFIG_XFS_WARN
+#define XFS_WARN 1
+#endif
+
+
#include "xfs_linux.h"
#endif /* __XFS_H__ */
};
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
STATIC int
xfs_allocbt_keys_inorder(
struct xfs_btree_cur *cur,
.init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
.key_diff = xfs_allocbt_key_diff,
.buf_ops = &xfs_allocbt_buf_ops,
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
.keys_inorder = xfs_allocbt_keys_inorder,
.recs_inorder = xfs_allocbt_recs_inorder,
#endif
};
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
STATIC int
xfs_bmbt_keys_inorder(
struct xfs_btree_cur *cur,
.init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
.key_diff = xfs_bmbt_key_diff,
.buf_ops = &xfs_bmbt_buf_ops,
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
.keys_inorder = xfs_bmbt_keys_inorder,
.recs_inorder = xfs_bmbt_recs_inorder,
#endif
const struct xfs_buf_ops *buf_ops;
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
/* check that k1 is lower than k2 */
int (*keys_inorder)(struct xfs_btree_cur *cur,
union xfs_btree_key *k1,
xfs_dir2_leaf_t *leaf1; /* first leaf structure */
xfs_dir2_leaf_t *leaf2; /* second leaf structure */
int mid; /* midpoint leaf index */
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
int oldstale; /* old count of stale leaves */
#endif
int oldsum; /* old total leaf count */
ents2 = xfs_dir3_leaf_ents_p(leaf2);
oldsum = hdr1.count + hdr2.count;
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
oldstale = hdr1.stale + hdr2.stale;
#endif
mid = oldsum >> 1;
.verify_write = xfs_inobt_write_verify,
};
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
STATIC int
xfs_inobt_keys_inorder(
struct xfs_btree_cur *cur,
.init_ptr_from_cur = xfs_inobt_init_ptr_from_cur,
.key_diff = xfs_inobt_key_diff,
.buf_ops = &xfs_inobt_buf_ops,
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
.keys_inorder = xfs_inobt_keys_inorder,
.recs_inorder = xfs_inobt_recs_inorder,
#endif
trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
}
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
int
xfs_isilocked(
xfs_inode_t *ip,
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
- if (!kbuf)
- goto out_dput;
+ kbuf = kmem_zalloc(al_hreq.buflen, KM_SLEEP | KM_MAYFAIL);
+ if (!kbuf) {
+ kbuf = kmem_zalloc_large(al_hreq.buflen);
+ if (!kbuf)
+ goto out_dput;
+ }
cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen,
error = -EFAULT;
out_kfree:
- kfree(kbuf);
+ if (is_vmalloc_addr(kbuf))
+ kmem_free_large(kbuf);
+ else
+ kmem_free(kbuf);
out_dput:
dput(dentry);
return error;
return PTR_ERR(dentry);
error = -ENOMEM;
- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
- if (!kbuf)
- goto out_dput;
+ kbuf = kmem_zalloc(al_hreq.buflen, KM_SLEEP | KM_MAYFAIL);
+ if (!kbuf) {
+ kbuf = kmem_zalloc_large(al_hreq.buflen);
+ if (!kbuf)
+ goto out_dput;
+ }
cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen,
error = -EFAULT;
out_kfree:
- kfree(kbuf);
+ if (is_vmalloc_addr(kbuf))
+ kmem_free_large(kbuf);
+ else
+ kmem_free(kbuf);
out_dput:
dput(dentry);
return error;
#define ASSERT_ALWAYS(expr) \
(unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
-#ifndef DEBUG
-#define ASSERT(expr) ((void)0)
+#ifdef DEBUG
+#define ASSERT(expr) \
+ (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
#ifndef STATIC
-# define STATIC static noinline
+# define STATIC noinline
#endif
-#else /* DEBUG */
+#else /* !DEBUG */
+
+#ifdef XFS_WARN
#define ASSERT(expr) \
- (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+ (unlikely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
#ifndef STATIC
-# define STATIC noinline
+# define STATIC static noinline
+#endif
+
+#else /* !DEBUG && !XFS_WARN */
+
+#define ASSERT(expr) ((void)0)
+
+#ifndef STATIC
+# define STATIC static noinline
#endif
+#endif /* XFS_WARN */
#endif /* DEBUG */
#endif /* __XFS_LINUX__ */
BUG_ON(do_panic);
}
+void
+asswarn(char *expr, char *file, int line)
+{
+ xfs_warn(NULL, "Assertion failed: %s, file: %s, line: %d",
+ expr, file, line);
+ WARN_ON(1);
+}
+
void
assfail(char *expr, char *file, int line)
{
xfs_printk_ratelimited(xfs_debug, dev, fmt, ##__VA_ARGS__)
extern void assfail(char *expr, char *f, int l);
+extern void asswarn(char *expr, char *f, int l);
extern void xfs_hex_dump(void *p, int length);
int64_t t_res_fdblocks_delta; /* on-disk only chg */
int64_t t_frextents_delta;/* superblock freextents chg*/
int64_t t_res_frextents_delta; /* on-disk only chg */
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
int64_t t_ag_freeblks_delta; /* debugging counter */
int64_t t_ag_flist_delta; /* debugging counter */
int64_t t_ag_btree_delta; /* debugging counter */
#define xfs_trans_get_block_res(tp) ((tp)->t_blk_res)
#define xfs_trans_set_sync(tp) ((tp)->t_flags |= XFS_TRANS_SYNC)
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
#define xfs_trans_agblocks_delta(tp, d) ((tp)->t_ag_freeblks_delta += (int64_t)d)
#define xfs_trans_agflist_delta(tp, d) ((tp)->t_ag_flist_delta += (int64_t)d)
#define xfs_trans_agbtree_delta(tp, d) ((tp)->t_ag_btree_delta += (int64_t)d)
--- /dev/null
+/*
+ * ACPI helpers for DMA request / controller
+ *
+ * Based on of_dma.h
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_ACPI_DMA_H
+#define __LINUX_ACPI_DMA_H
+
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+
+/**
+ * struct acpi_dma_spec - slave device DMA resources
+ * @chan_id: channel unique id
+ * @slave_id: request line unique id
+ * @dev: struct device of the DMA controller to be used in the filter
+ * function
+ */
+struct acpi_dma_spec {
+ int chan_id;
+ int slave_id;
+ struct device *dev;
+};
+
+/**
+ * struct acpi_dma - representation of the registered DMAC
+ * @dma_controllers: linked list node
+ * @dev: struct device of this controller
+ * @acpi_dma_xlate: callback function to find a suitable channel
+ * @data: private data used by a callback function
+ */
+struct acpi_dma {
+ struct list_head dma_controllers;
+ struct device *dev;
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *);
+ void *data;
+};
+
+/* Used with acpi_dma_simple_xlate() */
+struct acpi_dma_filter_info {
+ dma_cap_mask_t dma_cap;
+ dma_filter_fn filter_fn;
+};
+
+#ifdef CONFIG_DMA_ACPI
+
+int acpi_dma_controller_register(struct device *dev,
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *),
+ void *data);
+int acpi_dma_controller_free(struct device *dev);
+int devm_acpi_dma_controller_register(struct device *dev,
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *),
+ void *data);
+void devm_acpi_dma_controller_free(struct device *dev);
+
+struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
+ size_t index);
+struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
+ const char *name);
+
+struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec,
+ struct acpi_dma *adma);
+#else
+
+static inline int acpi_dma_controller_register(struct device *dev,
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *),
+ void *data)
+{
+ return -ENODEV;
+}
+static inline int acpi_dma_controller_free(struct device *dev)
+{
+ return -ENODEV;
+}
+static inline int devm_acpi_dma_controller_register(struct device *dev,
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *),
+ void *data)
+{
+ return -ENODEV;
+}
+static inline void devm_acpi_dma_controller_free(struct device *dev)
+{
+}
+
+static inline struct dma_chan *acpi_dma_request_slave_chan_by_index(
+ struct device *dev, size_t index)
+{
+ return NULL;
+}
+static inline struct dma_chan *acpi_dma_request_slave_chan_by_name(
+ struct device *dev, const char *name)
+{
+ return NULL;
+}
+
+#define acpi_dma_simple_xlate NULL
+
+#endif
+
+#define acpi_dma_request_slave_channel acpi_dma_request_slave_chan_by_index
+
+#endif /* __LINUX_ACPI_DMA_H */
#define __CPU_COOLING_H__
#include <linux/thermal.h>
+#include <linux/cpumask.h>
-#define CPUFREQ_COOLING_START 0
-#define CPUFREQ_COOLING_STOP 1
-
-#if defined(CONFIG_CPU_THERMAL) || defined(CONFIG_CPU_THERMAL_MODULE)
+#ifdef CONFIG_CPU_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
* @clip_cpus: cpumask of cpus where the frequency constraints will happen
*/
-struct thermal_cooling_device *cpufreq_cooling_register(
- const struct cpumask *clip_cpus);
+struct thermal_cooling_device *
+cpufreq_cooling_register(const struct cpumask *clip_cpus);
/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
* @cdev: thermal cooling device pointer.
*/
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
+
+unsigned long cpufreq_cooling_get_level(unsigned int, unsigned int);
#else /* !CONFIG_CPU_THERMAL */
-static inline struct thermal_cooling_device *cpufreq_cooling_register(
- const struct cpumask *clip_cpus)
+static inline struct thermal_cooling_device *
+cpufreq_cooling_register(const struct cpumask *clip_cpus)
{
return NULL;
}
-static inline void cpufreq_cooling_unregister(
- struct thermal_cooling_device *cdev)
+static inline
+void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
return;
}
+static inline
+unsigned long cpufreq_cooling_get_level(unsigned int, unsigned int)
+{
+ return THERMAL_CSTATE_INVALID;
+}
#endif /* CONFIG_CPU_THERMAL */
#endif /* __CPU_COOLING_H__ */
#ifdef CONFIG_DMA_ENGINE
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
void dma_issue_pending_all(void);
-struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
-struct dma_chan *dma_request_slave_channel(struct device *dev, char *name);
+struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param);
+struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
void dma_release_channel(struct dma_chan *chan);
#else
static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
static inline void dma_issue_pending_all(void)
{
}
-static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask,
+static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
dma_filter_fn fn, void *fn_param)
{
return NULL;
}
static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
- char *name)
+ const char *name)
{
return NULL;
}
__dma_request_slave_channel_compat(&(mask), x, y, dev, name)
static inline struct dma_chan
-*__dma_request_slave_channel_compat(dma_cap_mask_t *mask, dma_filter_fn fn,
- void *fn_param, struct device *dev,
- char *name)
+*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param,
+ struct device *dev, char *name)
{
struct dma_chan *chan;
const char *label;
};
-#ifdef CONFIG_GENERIC_GPIO
+#ifdef CONFIG_GPIOLIB
#ifdef CONFIG_ARCH_HAVE_CUSTOM_GPIO_H
#include <asm/gpio.h>
#endif /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */
-#else /* ! CONFIG_GENERIC_GPIO */
+#else /* ! CONFIG_GPIOLIB */
#include <linux/kernel.h>
#include <linux/types.h>
WARN_ON(1);
}
-#endif /* ! CONFIG_GENERIC_GPIO */
+#endif /* ! CONFIG_GPIOLIB */
struct device;
struct mtd_part_parser_data;
extern int mtd_device_parse_register(struct mtd_info *mtd,
- const char **part_probe_types,
- struct mtd_part_parser_data *parser_data,
- const struct mtd_partition *defparts,
- int defnr_parts);
+ const char * const *part_probe_types,
+ struct mtd_part_parser_data *parser_data,
+ const struct mtd_partition *defparts,
+ int defnr_parts);
#define mtd_device_register(master, parts, nr_parts) \
mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
extern int mtd_device_unregister(struct mtd_info *master);
#define NAND_CMD_READOOB 0x50
#define NAND_CMD_ERASE1 0x60
#define NAND_CMD_STATUS 0x70
-#define NAND_CMD_STATUS_MULTI 0x71
#define NAND_CMD_SEQIN 0x80
#define NAND_CMD_RNDIN 0x85
#define NAND_CMD_READID 0x90
#define NAND_CMD_RNDOUTSTART 0xE0
#define NAND_CMD_CACHEDPROG 0x15
-/* Extended commands for AG-AND device */
-/*
- * Note: the command for NAND_CMD_DEPLETE1 is really 0x00 but
- * there is no way to distinguish that from NAND_CMD_READ0
- * until the remaining sequence of commands has been completed
- * so add a high order bit and mask it off in the command.
- */
-#define NAND_CMD_DEPLETE1 0x100
-#define NAND_CMD_DEPLETE2 0x38
-#define NAND_CMD_STATUS_MULTI 0x71
-#define NAND_CMD_STATUS_ERROR 0x72
-/* multi-bank error status (banks 0-3) */
-#define NAND_CMD_STATUS_ERROR0 0x73
-#define NAND_CMD_STATUS_ERROR1 0x74
-#define NAND_CMD_STATUS_ERROR2 0x75
-#define NAND_CMD_STATUS_ERROR3 0x76
-#define NAND_CMD_STATUS_RESET 0x7f
-#define NAND_CMD_STATUS_CLEAR 0xff
-
#define NAND_CMD_NONE -1
/* Status bits */
*/
/* Buswidth is 16 bit */
#define NAND_BUSWIDTH_16 0x00000002
-/* Device supports partial programming without padding */
-#define NAND_NO_PADDING 0x00000004
/* Chip has cache program function */
#define NAND_CACHEPRG 0x00000008
-/* Chip has copy back function */
-#define NAND_COPYBACK 0x00000010
-/*
- * AND Chip which has 4 banks and a confusing page / block
- * assignment. See Renesas datasheet for further information.
- */
-#define NAND_IS_AND 0x00000020
-/*
- * Chip has a array of 4 pages which can be read without
- * additional ready /busy waits.
- */
-#define NAND_4PAGE_ARRAY 0x00000040
-/*
- * Chip requires that BBT is periodically rewritten to prevent
- * bits from adjacent blocks from 'leaking' in altering data.
- * This happens with the Renesas AG-AND chips, possibly others.
- */
-#define BBT_AUTO_REFRESH 0x00000080
/*
* Chip requires ready check on read (for auto-incremented sequential read).
* True only for small page devices; large page devices do not support
#define NAND_SUBPAGE_READ 0x00001000
/* Options valid for Samsung large page devices */
-#define NAND_SAMSUNG_LP_OPTIONS \
- (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK)
+#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
/* Macros to identify the above */
-#define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING))
#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
-#define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK))
#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
/* Non chip related options */
* any single ECC step, 0 if bitflips uncorrectable, -EIO hw error
* @read_subpage: function to read parts of the page covered by ECC;
* returns same as read_page()
+ * @write_subpage: function to write parts of the page covered by ECC.
* @write_page: function to write a page according to the ECC generator
* requirements.
* @write_oob_raw: function to write chip OOB data without ECC
uint8_t *buf, int oob_required, int page);
int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t offs, uint32_t len, uint8_t *buf);
+ int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offset, uint32_t data_len,
+ const uint8_t *data_buf, int oob_required);
int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required);
int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state,
int status, int page);
int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page,
- int cached, int raw);
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw);
int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
int feature_addr, uint8_t *subfeature_para);
int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip,
#define NAND_MFR_MACRONIX 0xc2
#define NAND_MFR_EON 0x92
+/* The maximum expected count of bytes in the NAND ID sequence */
+#define NAND_MAX_ID_LEN 8
+
+/*
+ * A helper for defining older NAND chips where the second ID byte fully
+ * defined the chip, including the geometry (chip size, eraseblock size, page
+ * size). All these chips have 512 bytes NAND page size.
+ */
+#define LEGACY_ID_NAND(nm, devid, chipsz, erasesz, opts) \
+ { .name = (nm), {{ .dev_id = (devid) }}, .pagesize = 512, \
+ .chipsize = (chipsz), .erasesize = (erasesz), .options = (opts) }
+
+/*
+ * A helper for defining newer chips which report their page size and
+ * eraseblock size via the extended ID bytes.
+ *
+ * The real difference between LEGACY_ID_NAND and EXTENDED_ID_NAND is that with
+ * EXTENDED_ID_NAND, manufacturers overloaded the same device ID so that the
+ * device ID now only represented a particular total chip size (and voltage,
+ * buswidth), and the page size, eraseblock size, and OOB size could vary while
+ * using the same device ID.
+ */
+#define EXTENDED_ID_NAND(nm, devid, chipsz, opts) \
+ { .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \
+ .options = (opts) }
+
/**
* struct nand_flash_dev - NAND Flash Device ID Structure
- * @name: Identify the device type
- * @id: device ID code
- * @pagesize: Pagesize in bytes. Either 256 or 512 or 0
- * If the pagesize is 0, then the real pagesize
- * and the eraseize are determined from the
- * extended id bytes in the chip
- * @erasesize: Size of an erase block in the flash device.
- * @chipsize: Total chipsize in Mega Bytes
- * @options: Bitfield to store chip relevant options
+ * @name: a human-readable name of the NAND chip
+ * @dev_id: the device ID (the second byte of the full chip ID array)
+ * @mfr_id: manufecturer ID part of the full chip ID array (refers the same
+ * memory address as @id[0])
+ * @dev_id: device ID part of the full chip ID array (refers the same memory
+ * address as @id[1])
+ * @id: full device ID array
+ * @pagesize: size of the NAND page in bytes; if 0, then the real page size (as
+ * well as the eraseblock size) is determined from the extended NAND
+ * chip ID array)
+ * @chipsize: total chip size in MiB
+ * @erasesize: eraseblock size in bytes (determined from the extended ID if 0)
+ * @options: stores various chip bit options
+ * @id_len: The valid length of the @id.
+ * @oobsize: OOB size
*/
struct nand_flash_dev {
char *name;
- int id;
- unsigned long pagesize;
- unsigned long chipsize;
- unsigned long erasesize;
- unsigned long options;
+ union {
+ struct {
+ uint8_t mfr_id;
+ uint8_t dev_id;
+ };
+ uint8_t id[NAND_MAX_ID_LEN];
+ };
+ unsigned int pagesize;
+ unsigned int chipsize;
+ unsigned int erasesize;
+ unsigned int options;
+ uint16_t id_len;
+ uint16_t oobsize;
};
/**
unsigned int pfow_base;
char *probe_type;
struct mtd_partition *parts;
- const char **part_probe_types;
+ const char * const *part_probe_types;
};
#endif /* __LINUX_MTD_PHYSMAP__ */
struct platdata_mtd_ram {
const char *mapname;
- const char **map_probes;
- const char **probes;
+ const char * const *map_probes;
+ const char * const *probes;
struct mtd_partition *partitions;
int nr_partitions;
int bankwidth;
struct nfs41_free_stateid_args {
struct nfs4_sequence_args seq_args;
- nfs4_stateid *stateid;
+ nfs4_stateid stateid;
};
struct nfs41_free_stateid_res {
__u8 vs[1024];
};
+enum {
+ NVME_CTRL_ONCS_COMPARE = 1 << 0,
+ NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
+ NVME_CTRL_ONCS_DSM = 1 << 2,
+};
+
struct nvme_lbaf {
__le16 ms;
__u8 ds;
__u8 flags;
__u16 command_id;
__le32 nsid;
- __u32 cdw2[2];
+ __le32 cdw2[2];
__le64 metadata;
__le64 prp1;
__le64 prp2;
- __u32 cdw10[6];
+ __le32 cdw10[6];
};
struct nvme_rw_command {
NVME_RW_DSM_COMPRESSED = 1 << 7,
};
+struct nvme_dsm_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __u64 rsvd2[2];
+ __le64 prp1;
+ __le64 prp2;
+ __le32 nr;
+ __le32 attributes;
+ __u32 rsvd12[4];
+};
+
+enum {
+ NVME_DSMGMT_IDR = 1 << 0,
+ NVME_DSMGMT_IDW = 1 << 1,
+ NVME_DSMGMT_AD = 1 << 2,
+};
+
+struct nvme_dsm_range {
+ __le32 cattr;
+ __le32 nlb;
+ __le64 slba;
+};
+
/* Admin commands */
enum nvme_admin_opcode {
NVME_FEAT_WRITE_ATOMIC = 0x0a,
NVME_FEAT_ASYNC_EVENT = 0x0b,
NVME_FEAT_SW_PROGRESS = 0x0c,
+ NVME_FWACT_REPL = (0 << 3),
+ NVME_FWACT_REPL_ACTV = (1 << 3),
+ NVME_FWACT_ACTV = (2 << 3),
};
struct nvme_identify {
__u32 rsvd12[4];
};
+struct nvme_format_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __u64 rsvd2[4];
+ __le32 cdw10;
+ __u32 rsvd11[5];
+};
+
struct nvme_command {
union {
struct nvme_common_command common;
struct nvme_create_sq create_sq;
struct nvme_delete_queue delete_queue;
struct nvme_download_firmware dlfw;
+ struct nvme_format_cmd format;
+ struct nvme_dsm_cmd dsm;
};
};
NVME_SC_FUSED_FAIL = 0x9,
NVME_SC_FUSED_MISSING = 0xa,
NVME_SC_INVALID_NS = 0xb,
+ NVME_SC_CMD_SEQ_ERROR = 0xc,
NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81,
NVME_SC_NS_NOT_READY = 0x82,
#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd)
#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io)
+#ifdef __KERNEL__
+#include <linux/pci.h>
+#include <linux/miscdevice.h>
+#include <linux/kref.h>
+
+#define NVME_IO_TIMEOUT (5 * HZ)
+
+/*
+ * Represents an NVM Express device. Each nvme_dev is a PCI function.
+ */
+struct nvme_dev {
+ struct list_head node;
+ struct nvme_queue **queues;
+ u32 __iomem *dbs;
+ struct pci_dev *pci_dev;
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool;
+ int instance;
+ int queue_count;
+ int db_stride;
+ u32 ctrl_config;
+ struct msix_entry *entry;
+ struct nvme_bar __iomem *bar;
+ struct list_head namespaces;
+ struct kref kref;
+ struct miscdevice miscdev;
+ char name[12];
+ char serial[20];
+ char model[40];
+ char firmware_rev[8];
+ u32 max_hw_sectors;
+ u32 stripe_size;
+ u16 oncs;
+};
+
+/*
+ * An NVM Express namespace is equivalent to a SCSI LUN
+ */
+struct nvme_ns {
+ struct list_head list;
+
+ struct nvme_dev *dev;
+ struct request_queue *queue;
+ struct gendisk *disk;
+
+ int ns_id;
+ int lba_shift;
+ int ms;
+ u64 mode_select_num_blocks;
+ u32 mode_select_block_len;
+};
+
+/*
+ * The nvme_iod describes the data in an I/O, including the list of PRP
+ * entries. You can't see it in this data structure because C doesn't let
+ * me express that. Use nvme_alloc_iod to ensure there's enough space
+ * allocated to store the PRP list.
+ */
+struct nvme_iod {
+ void *private; /* For the use of the submitter of the I/O */
+ int npages; /* In the PRP list. 0 means small pool in use */
+ int offset; /* Of PRP list */
+ int nents; /* Used in scatterlist */
+ int length; /* Of data, in bytes */
+ dma_addr_t first_dma;
+ struct scatterlist sg[0];
+};
+
+static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
+{
+ return (sector >> (ns->lba_shift - 9));
+}
+
+/**
+ * nvme_free_iod - frees an nvme_iod
+ * @dev: The device that the I/O was submitted to
+ * @iod: The memory to free
+ */
+void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
+
+int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
+ struct nvme_iod *iod, int total_len, gfp_t gfp);
+struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
+ unsigned long addr, unsigned length);
+void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+ struct nvme_iod *iod);
+struct nvme_queue *get_nvmeq(struct nvme_dev *dev);
+void put_nvmeq(struct nvme_queue *nvmeq);
+int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
+ u32 *result, unsigned timeout);
+int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
+int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
+ u32 *result);
+int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
+ dma_addr_t dma_addr);
+int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
+ dma_addr_t dma_addr, u32 *result);
+int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
+ dma_addr_t dma_addr, u32 *result);
+
+struct sg_io_hdr;
+
+int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
+int nvme_sg_get_version_num(int __user *ip);
+
+#endif
+
#endif /* _LINUX_NVME_H */
struct dma_chan *(*of_dma_xlate)
(struct of_phandle_args *, struct of_dma *);
void *of_dma_data;
- int use_count;
};
struct of_dma_filter_info {
struct dma_chan *(*of_dma_xlate)
(struct of_phandle_args *, struct of_dma *),
void *data);
-extern int of_dma_controller_free(struct device_node *np);
+extern void of_dma_controller_free(struct device_node *np);
extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
- char *name);
+ const char *name);
extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma);
#else
return -ENODEV;
}
-static inline int of_dma_controller_free(struct device_node *np)
+static inline void of_dma_controller_free(struct device_node *np)
{
- return -ENODEV;
}
static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
- char *name)
+ const char *name)
{
return NULL;
}
void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
struct elm_errorvec *err_vec);
-void elm_config(struct device *dev, enum bch_ecc bch_type);
+int elm_config(struct device *dev, enum bch_ecc bch_type);
#endif /* __ELM_H */
+++ /dev/null
-/*
- * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-#include <linux/errno.h>
-
-#ifdef CONFIG_IRAM_ALLOC
-
-int __init iram_init(unsigned long base, unsigned long size);
-void __iomem *iram_alloc(unsigned int size, unsigned long *dma_addr);
-void iram_free(unsigned long dma_addr, unsigned int size);
-
-#else
-
-static inline int __init iram_init(unsigned long base, unsigned long size)
-{
- return -ENOMEM;
-}
-
-static inline void __iomem *iram_alloc(unsigned int size, unsigned long *dma_addr)
-{
- return NULL;
-}
-
-static inline void iram_free(unsigned long base, unsigned long size) {}
-
-#endif
--- /dev/null
+/*
+ * Header for the SUDMAC driver
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+#ifndef SUDMAC_H
+#define SUDMAC_H
+
+#include <linux/dmaengine.h>
+#include <linux/shdma-base.h>
+#include <linux/types.h>
+
+/* Used by slave DMA clients to request DMA to/from a specific peripheral */
+struct sudmac_slave {
+ struct shdma_slave shdma_slave; /* Set by the platform */
+};
+
+/*
+ * Supplied by platforms to specify, how a DMA channel has to be configured for
+ * a certain peripheral
+ */
+struct sudmac_slave_config {
+ int slave_id;
+};
+
+struct sudmac_channel {
+ unsigned long offset;
+ unsigned long config;
+ unsigned long wait; /* The configuable range is 0 to 3 */
+ unsigned long dint_end_bit;
+};
+
+struct sudmac_pdata {
+ const struct sudmac_slave_config *slave;
+ int slave_num;
+ const struct sudmac_channel *channel;
+ int channel_num;
+};
+
+/* Definitions for the sudmac_channel.config */
+#define SUDMAC_TX_BUFFER_MODE BIT(0)
+#define SUDMAC_RX_END_MODE BIT(1)
+
+/* Definitions for the sudmac_channel.dint_end_bit */
+#define SUDMAC_DMA_BIT_CH0 BIT(0)
+#define SUDMAC_DMA_BIT_CH1 BIT(1)
+
+#endif
#define THERMAL_MAX_TRIPS 12
#define THERMAL_NAME_LENGTH 20
+/* invalid cooling state */
+#define THERMAL_CSTATE_INVALID -1UL
+
/* No upper/lower limit requirement */
-#define THERMAL_NO_LIMIT -1UL
+#define THERMAL_NO_LIMIT THERMAL_CSTATE_INVALID
/* Unit conversion macros */
#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
char name[THERMAL_NAME_LENGTH];
int (*throttle)(struct thermal_zone_device *tz, int trip);
struct list_head governor_list;
- struct module *owner;
};
/* Structure that holds binding parameters for a zone */
struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
const struct thermal_cooling_device_ops *);
void thermal_cooling_device_unregister(struct thermal_cooling_device *);
+struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name);
+int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp);
int get_tz_trend(struct thermal_zone_device *, int);
struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
struct thermal_cooling_device *, int);
void thermal_cdev_update(struct thermal_cooling_device *);
-void notify_thermal_framework(struct thermal_zone_device *, int);
-
-int thermal_register_governor(struct thermal_governor *);
-void thermal_unregister_governor(struct thermal_governor *);
+void thermal_notify_framework(struct thermal_zone_device *, int);
#ifdef CONFIG_NET
extern int thermal_generate_netlink_event(struct thermal_zone_device *tz,
enum events event);
#else
-static int thermal_generate_netlink_event(struct thermal_zone_device *tz,
+static inline int thermal_generate_netlink_event(struct thermal_zone_device *tz,
enum events event)
{
return 0;
struct sk_buff_head done;
struct sk_buff_head rxq_pause;
struct urb *interrupt;
+ unsigned interrupt_count;
+ struct mutex interrupt_mutex;
struct usb_anchor deferred;
struct tasklet_struct bh;
extern int usbnet_manage_power(struct usbnet *, int);
extern void usbnet_link_change(struct usbnet *, bool, bool);
+extern int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags);
+extern void usbnet_status_stop(struct usbnet *dev);
+
#endif /* __LINUX_USB_USBNET_H */
#define BTRFS_QUOTA_CTL_ENABLE 1
#define BTRFS_QUOTA_CTL_DISABLE 2
-#define BTRFS_QUOTA_CTL_RESCAN 3
+#define BTRFS_QUOTA_CTL_RESCAN__NOTUSED 3
struct btrfs_ioctl_quota_ctl_args {
__u64 cmd;
__u64 status;
};
+struct btrfs_ioctl_quota_rescan_args {
+ __u64 flags;
+ __u64 progress;
+ __u64 reserved[6];
+};
+
struct btrfs_ioctl_qgroup_assign_args {
__u64 assign;
__u64 src;
* search of clone sources doesn't find an extent. UPDATE_EXTENT
* commands will be sent instead of WRITE commands.
*/
-#define BTRFS_SEND_FLAG_NO_FILE_DATA 0x1
+#define BTRFS_SEND_FLAG_NO_FILE_DATA 0x1
+
+/*
+ * Do not add the leading stream header. Used when multiple snapshots
+ * are sent back to back.
+ */
+#define BTRFS_SEND_FLAG_OMIT_STREAM_HEADER 0x2
+
+/*
+ * Omit the command at the end of the stream that indicated the end
+ * of the stream. This option is used when multiple snapshots are
+ * sent back to back.
+ */
+#define BTRFS_SEND_FLAG_OMIT_END_CMD 0x4
+
+#define BTRFS_SEND_FLAG_MASK \
+ (BTRFS_SEND_FLAG_NO_FILE_DATA | \
+ BTRFS_SEND_FLAG_OMIT_STREAM_HEADER | \
+ BTRFS_SEND_FLAG_OMIT_END_CMD)
struct btrfs_ioctl_send_args {
__s64 send_fd; /* in */
struct btrfs_ioctl_qgroup_create_args)
#define BTRFS_IOC_QGROUP_LIMIT _IOR(BTRFS_IOCTL_MAGIC, 43, \
struct btrfs_ioctl_qgroup_limit_args)
+#define BTRFS_IOC_QUOTA_RESCAN _IOW(BTRFS_IOCTL_MAGIC, 44, \
+ struct btrfs_ioctl_quota_rescan_args)
+#define BTRFS_IOC_QUOTA_RESCAN_STATUS _IOR(BTRFS_IOCTL_MAGIC, 45, \
+ struct btrfs_ioctl_quota_rescan_args)
#define BTRFS_IOC_GET_FSLABEL _IOR(BTRFS_IOCTL_MAGIC, 49, \
char[BTRFS_LABEL_SIZE])
#define BTRFS_IOC_SET_FSLABEL _IOW(BTRFS_IOCTL_MAGIC, 50, \
*/
/* some useful defines for sb1000.c e cmconfig.c - fv */
-#define SIOCGCMSTATS SIOCDEVPRIVATE+0 /* get cable modem stats */
-#define SIOCGCMFIRMWARE SIOCDEVPRIVATE+1 /* get cm firmware version */
-#define SIOCGCMFREQUENCY SIOCDEVPRIVATE+2 /* get cable modem frequency */
-#define SIOCSCMFREQUENCY SIOCDEVPRIVATE+3 /* set cable modem frequency */
-#define SIOCGCMPIDS SIOCDEVPRIVATE+4 /* get cable modem PIDs */
-#define SIOCSCMPIDS SIOCDEVPRIVATE+5 /* set cable modem PIDs */
+#define SIOCGCMSTATS (SIOCDEVPRIVATE+0) /* get cable modem stats */
+#define SIOCGCMFIRMWARE (SIOCDEVPRIVATE+1) /* get cm firmware version */
+#define SIOCGCMFREQUENCY (SIOCDEVPRIVATE+2) /* get cable modem frequency */
+#define SIOCSCMFREQUENCY (SIOCDEVPRIVATE+3) /* set cable modem frequency */
+#define SIOCGCMPIDS (SIOCDEVPRIVATE+4) /* get cable modem PIDs */
+#define SIOCSCMPIDS (SIOCDEVPRIVATE+5) /* set cable modem PIDs */
#endif
struct sem_queue * q;
semncnt = 0;
+ list_for_each_entry(q, &sma->sem_base[semnum].sem_pending, list) {
+ struct sembuf * sops = q->sops;
+ BUG_ON(sops->sem_num != semnum);
+ if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT))
+ semncnt++;
+ }
+
list_for_each_entry(q, &sma->sem_pending, list) {
struct sembuf * sops = q->sops;
int nsops = q->nsops;
struct sem_queue * q;
semzcnt = 0;
+ list_for_each_entry(q, &sma->sem_base[semnum].sem_pending, list) {
+ struct sembuf * sops = q->sops;
+ BUG_ON(sops->sem_num != semnum);
+ if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT))
+ semzcnt++;
+ }
+
list_for_each_entry(q, &sma->sem_pending, list) {
struct sembuf * sops = q->sops;
int nsops = q->nsops;
if (shmflg & SHM_HUGETLB) {
struct hstate *hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT)
& SHM_HUGE_MASK);
- size_t hugesize = ALIGN(size, huge_page_size(hs));
+ size_t hugesize;
+
+ if (!hs) {
+ error = -EINVAL;
+ goto no_file;
+ }
+ hugesize = ALIGN(size, huge_page_size(hs));
/* hugetlb_file_setup applies strict accounting */
if (shmflg & SHM_NORESERVE)
len = ALIGN(len, huge_page_size(hstate_file(file)));
} else if (flags & MAP_HUGETLB) {
struct user_struct *user = NULL;
+ struct hstate *hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) &
+ SHM_HUGE_MASK);
- len = ALIGN(len, huge_page_size(hstate_sizelog(
- (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK)));
+ if (!hs)
+ return -EINVAL;
+
+ len = ALIGN(len, huge_page_size(hs));
/*
* VM_NORESERVE is used because the reservations will be
* taken when vm_ops->mmap() is called
__be16 type = skb->protocol;
int vlan_depth = ETH_HLEN;
+ /* Tunnel gso handlers can set protocol to ethernet. */
+ if (type == htons(ETH_P_TEB)) {
+ struct ethhdr *eth;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
+ return 0;
+
+ eth = (struct ethhdr *)skb_mac_header(skb);
+ type = eth->h_proto;
+ }
+
while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
struct vlan_hdr *vh;
csum = false;
/* setup inner skb. */
- if (greh->protocol == htons(ETH_P_TEB)) {
- struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
- skb->protocol = eth->h_proto;
- } else {
- skb->protocol = greh->protocol;
- }
-
+ skb->protocol = greh->protocol;
skb->encapsulation = 0;
if (unlikely(!pskb_may_pull(skb, ghl)))
struct sk_buff *segs = ERR_PTR(-EINVAL);
int mac_len = skb->mac_len;
int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
- struct ethhdr *inner_eth = (struct ethhdr *)skb_inner_mac_header(skb);
__be16 protocol = skb->protocol;
netdev_features_t enc_features;
int outer_hlen;
skb_reset_mac_header(skb);
skb_set_network_header(skb, skb_inner_network_offset(skb));
skb->mac_len = skb_inner_network_offset(skb);
- inner_eth = (struct ethhdr *)skb_mac_header(skb);
- skb->protocol = inner_eth->h_proto;
+ skb->protocol = htons(ETH_P_TEB);
/* segment inner packet. */
enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
err = -EINVAL;
gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
if (!gss_auth->mech) {
- printk(KERN_WARNING "%s: Pseudoflavor %d not found!\n",
- __func__, flavor);
+ dprintk("RPC: Pseudoflavor %d not found!\n", flavor);
goto err_free;
}
gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
auth = rpcauth_create(args->authflavor, clnt);
if (IS_ERR(auth)) {
- printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
+ dprintk("RPC: Couldn't create auth handle (flavor %u)\n",
args->authflavor);
err = PTR_ERR(auth);
goto out_no_auth;